Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
de67658
Update llama-cpp-python to 0.3.16
xkong-anaconda Oct 20, 2025
937bb68
update
xkong-anaconda Nov 9, 2025
bed88cc
Fix Windows build: exclude make from Windows builds
xkong-anaconda Nov 9, 2025
17be2fd
Build vendored llama.cpp instead of using external dependency
xkong-anaconda Nov 10, 2025
b4d95df
Disable tool building, only build libraries
xkong-anaconda Nov 10, 2025
2a2765a
Fix overlinking error
xkong-anaconda Nov 10, 2025
9980451
new patch
xkong-anaconda Nov 11, 2025
6ae239b
added llvm-openmp to the host requirements
xkong-anaconda Nov 11, 2025
b5d0bd2
Added missing_dso_whitelist for win
xkong-anaconda Nov 11, 2025
cf55c44
Created bld.bat to relocate DLLs
xkong-anaconda Nov 11, 2025
aeef243
linter fix
xkong-anaconda Nov 11, 2025
d8cf8f8
Address PR comment
xkong-anaconda Nov 12, 2025
0581df6
use the external llama.cpp b6188
xkong-anaconda Dec 14, 2025
1c8ea2d
Update recipe/meta.yaml
xkong-anaconda Dec 17, 2025
64080e3
Update recipe/meta.yaml
xkong-anaconda Dec 17, 2025
b249de5
Update recipe/meta.yaml
xkong-anaconda Dec 17, 2025
b4a4b89
Update recipe/meta.yaml
xkong-anaconda Dec 17, 2025
3fe5f24
Update recipe/meta.yaml
xkong-anaconda Dec 17, 2025
a59813e
Update recipe/meta.yaml
xkong-anaconda Dec 17, 2025
3a68363
Update recipe/meta.yaml
xkong-anaconda Dec 17, 2025
4366976
Update recipe/meta.yaml
xkong-anaconda Dec 17, 2025
66d81c0
Update recipe/meta.yaml
xkong-anaconda Dec 17, 2025
be75f42
Update recipe/meta.yaml
xkong-anaconda Dec 17, 2025
2fa7548
Fix missing [unix] selector on pip install line
xkong-anaconda Dec 17, 2025
58a6f37
fix
xkong-anaconda Dec 17, 2025
c5d3e1e
Add cmake to host requirements
xkong-anaconda Dec 17, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 91 additions & 0 deletions recipe/0001-Adapt-shared-library-relocation.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
From 8156a3728b89cbb944abf5af8376100da8832965 Mon Sep 17 00:00:00 2001
From: Julien Jerphanion <[email protected]>
Date: Fri, 22 Aug 2025 10:22:47 +0200
Subject: [PATCH] Adapt shared library relocation

Signed-off-by: Julien Jerphanion <[email protected]>
---
llama_cpp/_ctypes_extensions.py | 11 +++++++++--
llama_cpp/llama_cpp.py | 13 +++++++++++++
2 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/llama_cpp/_ctypes_extensions.py b/llama_cpp/_ctypes_extensions.py
index e88ed38..0acd159 100644
--- a/llama_cpp/_ctypes_extensions.py
+++ b/llama_cpp/_ctypes_extensions.py
@@ -29,16 +29,21 @@ def load_shared_library(lib_base_name: str, base_path: pathlib.Path):
if sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
lib_paths += [
base_path / f"lib{lib_base_name}.so",
+ f"lib{lib_base_name}.so",
]
elif sys.platform == "darwin":
lib_paths += [
base_path / f"lib{lib_base_name}.so",
base_path / f"lib{lib_base_name}.dylib",
+ f"{lib_base_name}.so",
+ f"lib{lib_base_name}.dylib",
]
elif sys.platform == "win32":
lib_paths += [
base_path / f"{lib_base_name}.dll",
base_path / f"lib{lib_base_name}.dll",
+ f"{lib_base_name}.dll",
+ f"lib{lib_base_name}.dll",
]
else:
raise RuntimeError("Unsupported platform")
@@ -62,14 +67,16 @@ def load_shared_library(lib_base_name: str, base_path: pathlib.Path):

# Try to load the shared library, handling potential errors
for lib_path in lib_paths:
- if lib_path.exists():
+ if isinstance(lib_path, str) or lib_path.exists():
try:
return ctypes.CDLL(str(lib_path), **cdll_args) # type: ignore
+ except OSError:
+ pass
except Exception as e:
raise RuntimeError(f"Failed to load shared library '{lib_path}': {e}")

raise FileNotFoundError(
- f"Shared library with base name '{lib_base_name}' not found"
+ f"Shared library with base name '{lib_base_name}' not found in {lib_paths}."
)


diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py
index 711d42a..a23c778 100644
--- a/llama_cpp/llama_cpp.py
+++ b/llama_cpp/llama_cpp.py
@@ -3,6 +3,7 @@ from __future__ import annotations
import os
import ctypes
import pathlib
+import sys

from typing import (
Callable,
@@ -32,7 +33,19 @@ if TYPE_CHECKING:

# Specify the base name of the shared library to load
_lib_base_name = "llama"
+
_override_base_path = os.environ.get("LLAMA_CPP_LIB_PATH")
+if sys.platform.startswith("win") and _override_base_path is None:
+ # llama.cpp windows' builds' DLL are stored in: `$CONDA_PREFIX/Library/bin/`
+ # We cannot assume that `$CONDA_PREFIX` is set, so we will use this
+ # file position to determine the prefix directory.
+
+ # This file directory in the prefix: `$CONDA_PREFIX/lib/site-packages/llama_cpp`
+ __this_file_dir = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
+ # Prefix directory: `$CONDA_PREFIX`
+ __prefix_dir = __this_file_dir.parent.parent.parent
+ _override_base_path = __prefix_dir / "Library" / "bin"
+
_base_path = pathlib.Path(os.path.abspath(os.path.dirname(__file__))) / "lib" if _override_base_path is None else pathlib.Path(_override_base_path)
# Load the library
_lib = load_shared_library(_lib_base_name, _base_path)
--
2.50.1

7 changes: 7 additions & 0 deletions recipe/bld.bat
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
:: Set CMake arguments to use external llama.cpp library
set CMAKE_ARGS=%CMAKE_ARGS% -DLLAMA_BUILD=OFF
set CMAKE_ARGS=%CMAKE_ARGS% -DLLAVA_BUILD=OFF

:: Install the package
%PYTHON% -m pip install . -vv --no-deps --no-build-isolation
if errorlevel 1 exit 1
46 changes: 25 additions & 21 deletions recipe/meta.yaml
Original file line number Diff line number Diff line change
@@ -1,61 +1,59 @@
{% set name = "llama-cpp-python" %}
# NOTE: VERIFY llama_cpp_version before merging!
{% set version = "0.2.24" %}
{% set llama_cpp_version = "0.0.1660" %}
{% set version = "0.3.16" %}
# Using external llama.cpp b6188 package (version constraint: >=0.0.6188,<0.0.6239)
# llama-cpp-python 0.3.16 requires llama.cpp with llama_get_kv_self() API,
# which was removed in b6239 (PR #15472)

package:
name: {{ name|lower }}
version: {{ version }}

source:
url: https://pypi.io/packages/source/{{ name[0] }}/{{ name }}/llama_cpp_python-{{ version }}.tar.gz
sha256: 85f8fd110b4b90599d5ff427bd4a1a4db6e70817c60ba8aa609fa5c645761ec1
sha256: 34ed0f9bd9431af045bb63d9324ae620ad0536653740e9bb163a2e1fcb973be6
patches:
# Asks cdll to look for the library in the path as well.
- try-lib-in-path.patch
# Adapt shared library relocation for conda environments (find system llama.cpp)
- 0001-Adapt-shared-library-relocation.patch

build:
number: 0
script:
{% macro cmake_args(key, value) -%}
- export CMAKE_ARGS="${CMAKE_ARGS} {{ key }}={{ value }}" # [unix]
- set CMAKE_ARGS=%CMAKE_ARGS% {{ key }}={{ value }} # [win]
{%- endmacro %}

{{ cmake_args("-DLLAMA_BUILD", "OFF") }}
{{ cmake_args("-DLLAVA_BUILD", "OFF") }}

- {{ PYTHON }} -m pip install . -vv
script: # [unix]
- export CMAKE_ARGS="${CMAKE_ARGS} -DLLAMA_BUILD=OFF" # [unix]
- export CMAKE_ARGS="${CMAKE_ARGS} -DLLAVA_BUILD=OFF" # [unix]
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
- export CMAKE_ARGS="${CMAKE_ARGS} -DLLAVA_BUILD=OFF" # [unix]
- export CMAKE_ARGS="${CMAKE_ARGS} -DLLAVA_BUILD=OFF" # [unix]
- set CMAKE_ARGS="%CMAKE_ARGS% -DLLAMA_BUILD=OFF" # [win]
- set CMAKE_ARGS="%CMAKE_ARGS% -DLLAVA_BUILD=OFF" # [win]

- {{ PYTHON }} -m pip install . -vv --no-deps --no-build-isolation # [unix]
requirements:
build:
- python # [build_platform != target_platform]
- cross-python_{{ target_platform }} # [build_platform != target_platform]

# Compilers needed because scikit-build-core runs CMake which requires C/C++ compiler
- {{ compiler('c') }}
- {{ compiler('cxx') }}
- cmake
- make
- pkgconfig
- make # [not win]

host:
- python
- scikit-build-core >=0.5.1
- pip
- cmake # Required by scikit-build-core at build time

run:
- python
- typing-extensions >=4.5.0
- numpy >=1.20.0
- diskcache >=5.6.1
- jinja2 >=2.11.3

- llama.cpp {{ llama_cpp_version }}
# External llama.cpp package (b6188 - last version with llama_get_kv_self API)
- llama.cpp >=0.0.6188,<0.0.6239

# Split into llama-cpp-python-server
- uvicorn >=0.22.0
- fastapi >=0.100.0
- pydantic-settings >=2.0.1
- sse-starlette >=1.6.1
- starlette-context >=0.3.6,<0.4
- starlette-context >=0.3.6
- pyyaml >=5.1
test:
imports:
- llama_cpp
Expand All @@ -67,6 +65,12 @@ test:
about:
home: https://github.com/abetlen/llama-cpp-python
summary: Python bindings for the llama.cpp library
description: |
Python bindings for llama.cpp, providing a simple Python interface for
inference with Large Language Models (LLMs) using the llama.cpp backend.
Supports CPU and GPU acceleration with external llama.cpp library.
dev_url: https://github.com/abetlen/llama-cpp-python
doc_url: https://llama-cpp-python.readthedocs.io
license: MIT
license_file:
- LICENSE.md
Expand Down
85 changes: 0 additions & 85 deletions recipe/try-lib-in-path.patch

This file was deleted.