Skip to content

Commit 0581df6

Browse files
use the external llama.cpp b6188
1 parent d8cf8f8 commit 0581df6

File tree

3 files changed

+11
-61
lines changed

3 files changed

+11
-61
lines changed

recipe/0002-Fix-ARM-SVE-detection-for-old-kernel-headers.patch

Lines changed: 0 additions & 32 deletions
This file was deleted.

recipe/bld.bat

Lines changed: 2 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,7 @@
1-
:: Set CMake arguments for vendored llama.cpp build
2-
set CMAKE_ARGS=%CMAKE_ARGS% -DLLAMA_BUILD=ON
1+
:: Set CMake arguments to use external llama.cpp library
2+
set CMAKE_ARGS=%CMAKE_ARGS% -DLLAMA_BUILD=OFF
33
set CMAKE_ARGS=%CMAKE_ARGS% -DLLAVA_BUILD=OFF
44

55
:: Install the package
66
%PYTHON% -m pip install . -vv --no-deps --no-build-isolation
77
if errorlevel 1 exit 1
8-
9-
:: Move DLLs from site-packages/bin to Library/bin (standard conda location)
10-
:: This matches the expectation in the patched llama_cpp.py
11-
if not exist %LIBRARY_BIN% mkdir %LIBRARY_BIN%
12-
13-
if exist %SP_DIR%\bin\*.dll (
14-
move %SP_DIR%\bin\*.dll %LIBRARY_BIN%\
15-
if errorlevel 1 exit 1
16-
)
17-
18-
:: Also move DLLs from site-packages/llama_cpp/lib to Library/bin
19-
if exist %SP_DIR%\llama_cpp\lib\*.dll (
20-
move %SP_DIR%\llama_cpp\lib\*.dll %LIBRARY_BIN%\
21-
if errorlevel 1 exit 1
22-
)

recipe/meta.yaml

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
{% set name = "llama-cpp-python" %}
22
{% set version = "0.3.16" %}
3-
# Building vendored llama.cpp (commit 4227c9b) instead of using external dependency
4-
# because llama-cpp-python 0.3.16 requires llama.cpp >= b6173, but the available
5-
# llama.cpp b6872 has breaking API changes (removed llama_get_kv_self function)
3+
# Using external llama.cpp b6188 package (version constraint: >=0.0.6188,<0.0.6239)
4+
# llama-cpp-python 0.3.16 requires llama.cpp with llama_get_kv_self() API,
5+
# which was removed in b6239 (PR #15472)
66

77
package:
88
name: {{ name|lower }}
@@ -12,10 +12,8 @@ source:
1212
url: https://pypi.io/packages/source/{{ name[0] }}/{{ name }}/llama_cpp_python-{{ version }}.tar.gz
1313
sha256: 34ed0f9bd9431af045bb63d9324ae620ad0536653740e9bb163a2e1fcb973be6
1414
patches:
15-
# Adapt shared library relocation for conda environments
15+
# Adapt shared library relocation for conda environments (find system llama.cpp)
1616
- 0001-Adapt-shared-library-relocation.patch
17-
# Fix ARM SVE detection for old kernel headers on aarch64
18-
- 0002-Fix-ARM-SVE-detection-for-old-kernel-headers.patch # [linux and aarch64]
1917

2018
build:
2119
number: 0
@@ -24,7 +22,7 @@ build:
2422
- $RPATH/ggml-cpu.dll # [win]
2523
- $RPATH/ggml.dll # [win]
2624
script: # [unix]
27-
- export CMAKE_ARGS="${CMAKE_ARGS} -DLLAMA_BUILD=ON" # [unix]
25+
- export CMAKE_ARGS="${CMAKE_ARGS} -DLLAMA_BUILD=OFF" # [unix]
2826
- export CMAKE_ARGS="${CMAKE_ARGS} -DLLAVA_BUILD=OFF" # [unix]
2927
- {{ PYTHON }} -m pip install . -vv --no-deps --no-build-isolation # [unix]
3028
requirements:
@@ -42,6 +40,7 @@ requirements:
4240
- python
4341
- scikit-build-core >=0.5.1
4442
- pip
43+
- llama.cpp >=0.0.6188,<0.0.6239
4544
- llvm-openmp # [osx]
4645

4746
run:
@@ -51,10 +50,8 @@ requirements:
5150
- diskcache >=5.6.1
5251
- jinja2 >=2.11.3
5352

54-
# Note: No external llama.cpp dependency - using vendored version (commit 4227c9b)
55-
# Vendored llama.cpp requires OpenMP
56-
- _openmp_mutex # [linux]
57-
- llvm-openmp # [osx]
53+
# External llama.cpp package (b6188 - last version with llama_get_kv_self API)
54+
- llama.cpp >=0.0.6188,<0.0.6239
5855

5956
# Split into llama-cpp-python-server
6057
- uvicorn >=0.22.0
@@ -77,7 +74,7 @@ about:
7774
description: |
7875
Python bindings for llama.cpp, providing a simple Python interface for
7976
inference with Large Language Models (LLMs) using the llama.cpp backend.
80-
Supports CPU and GPU acceleration with vendored llama.cpp library.
77+
Supports CPU and GPU acceleration with external llama.cpp library.
8178
dev_url: https://github.com/abetlen/llama-cpp-python
8279
doc_url: https://llama-cpp-python.readthedocs.io
8380
license: MIT

0 commit comments

Comments
 (0)