mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-29 12:35:16 +00:00
Merge branch 'master' into compilade/imatrix-batched-chunks
This commit is contained in:
@ -13,6 +13,7 @@ Checks: >
|
|||||||
-readability-magic-numbers,
|
-readability-magic-numbers,
|
||||||
-readability-uppercase-literal-suffix,
|
-readability-uppercase-literal-suffix,
|
||||||
-readability-simplify-boolean-expr,
|
-readability-simplify-boolean-expr,
|
||||||
|
-readability-math-missing-parentheses,
|
||||||
clang-analyzer-*,
|
clang-analyzer-*,
|
||||||
-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling,
|
-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling,
|
||||||
performance-*,
|
performance-*,
|
||||||
|
@ -14,9 +14,9 @@ WORKDIR /app
|
|||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||||
cmake -S . -B build -DCMAKE_BUILD_TYPE=Release -DGGML_NATIVE=OFF -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON; \
|
cmake -S . -B build -DCMAKE_BUILD_TYPE=Release -DGGML_NATIVE=OFF -DLLAMA_BUILD_TESTS=OFF -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON; \
|
||||||
elif [ "$TARGETARCH" = "arm64" ]; then \
|
elif [ "$TARGETARCH" = "arm64" ]; then \
|
||||||
cmake -S . -B build -DCMAKE_BUILD_TYPE=Release -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=${GGML_CPU_ARM_ARCH}; \
|
cmake -S . -B build -DCMAKE_BUILD_TYPE=Release -DGGML_NATIVE=OFF -DLLAMA_BUILD_TESTS=OFF -DGGML_CPU_ARM_ARCH=${GGML_CPU_ARM_ARCH}; \
|
||||||
else \
|
else \
|
||||||
echo "Unsupported architecture"; \
|
echo "Unsupported architecture"; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
|
@ -21,7 +21,7 @@ COPY . .
|
|||||||
RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
|
RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
|
||||||
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
|
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
|
||||||
fi && \
|
fi && \
|
||||||
cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DLLAMA_CURL=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
|
cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_BUILD_TESTS=OFF ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
|
||||||
cmake --build build --config Release -j$(nproc)
|
cmake --build build --config Release -j$(nproc)
|
||||||
|
|
||||||
RUN mkdir -p /app/lib && \
|
RUN mkdir -p /app/lib && \
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
ARG ONEAPI_VERSION=2025.0.0-0-devel-ubuntu22.04
|
ARG ONEAPI_VERSION=2025.1.1-0-devel-ubuntu24.04
|
||||||
|
|
||||||
## Build Image
|
## Build Image
|
||||||
|
|
||||||
@ -17,7 +17,7 @@ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
|
|||||||
&& export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
|
&& export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
|
||||||
fi && \
|
fi && \
|
||||||
echo "Building with dynamic libs" && \
|
echo "Building with dynamic libs" && \
|
||||||
cmake -B build -DGGML_NATIVE=OFF -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON ${OPT_SYCL_F16} && \
|
cmake -B build -DGGML_NATIVE=OFF -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_BUILD_TESTS=OFF ${OPT_SYCL_F16} && \
|
||||||
cmake --build build --config Release -j$(nproc)
|
cmake --build build --config Release -j$(nproc)
|
||||||
|
|
||||||
RUN mkdir -p /app/lib && \
|
RUN mkdir -p /app/lib && \
|
||||||
@ -49,19 +49,23 @@ COPY --from=build /app/full /app
|
|||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update && \
|
||||||
&& apt-get install -y \
|
apt-get install -y \
|
||||||
git \
|
git \
|
||||||
python3 \
|
python3 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
&& pip install --upgrade pip setuptools wheel \
|
python3-venv && \
|
||||||
&& pip install -r requirements.txt \
|
python3 -m venv /opt/venv && \
|
||||||
&& apt autoremove -y \
|
. /opt/venv/bin/activate && \
|
||||||
&& apt clean -y \
|
pip install --upgrade pip setuptools wheel && \
|
||||||
&& rm -rf /tmp/* /var/tmp/* \
|
pip install -r requirements.txt && \
|
||||||
&& find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
|
apt autoremove -y && \
|
||||||
&& find /var/cache -type f -delete
|
apt clean -y && \
|
||||||
|
rm -rf /tmp/* /var/tmp/* && \
|
||||||
|
find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete && \
|
||||||
|
find /var/cache -type f -delete
|
||||||
|
|
||||||
|
ENV PATH="/opt/venv/bin:$PATH"
|
||||||
|
|
||||||
ENTRYPOINT ["/app/tools.sh"]
|
ENTRYPOINT ["/app/tools.sh"]
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/runtime/lib64/stub:$LD_LIBRARY_PATH
|
|||||||
|
|
||||||
RUN echo "Building with static libs" && \
|
RUN echo "Building with static libs" && \
|
||||||
source /usr/local/Ascend/ascend-toolkit/set_env.sh --force && \
|
source /usr/local/Ascend/ascend-toolkit/set_env.sh --force && \
|
||||||
cmake -B build -DGGML_NATIVE=OFF -DGGML_CANN=ON -DBUILD_SHARED_LIBS=OFF && \
|
cmake -B build -DGGML_NATIVE=OFF -DGGML_CANN=ON -DBUILD_SHARED_LIBS=OFF -DLLAMA_BUILD_TESTS=OFF && \
|
||||||
cmake --build build --config Release --target llama-cli
|
cmake --build build --config Release --target llama-cli
|
||||||
|
|
||||||
# TODO: use image with NNRT
|
# TODO: use image with NNRT
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
ARG UBUNTU_VERSION=22.04
|
ARG UBUNTU_VERSION=22.04
|
||||||
# This needs to generally match the container host's environment.
|
# This needs to generally match the container host's environment.
|
||||||
ARG MUSA_VERSION=rc3.1.1
|
ARG MUSA_VERSION=rc4.0.1
|
||||||
# Target the MUSA build image
|
# Target the MUSA build image
|
||||||
ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-mudnn-devel-ubuntu${UBUNTU_VERSION}
|
||||||
|
|
||||||
ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
|
ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-mudnn-runtime-ubuntu${UBUNTU_VERSION}
|
||||||
|
|
||||||
FROM ${BASE_MUSA_DEV_CONTAINER} AS build
|
FROM ${BASE_MUSA_DEV_CONTAINER} AS build
|
||||||
|
|
||||||
@ -21,21 +21,14 @@ RUN apt-get update && \
|
|||||||
libcurl4-openssl-dev \
|
libcurl4-openssl-dev \
|
||||||
libgomp1
|
libgomp1
|
||||||
|
|
||||||
COPY requirements.txt requirements.txt
|
|
||||||
COPY requirements requirements
|
|
||||||
|
|
||||||
RUN pip install --upgrade pip setuptools wheel \
|
|
||||||
&& pip install -r requirements.txt
|
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Use the default MUSA archs if not specified
|
|
||||||
RUN if [ "${MUSA_DOCKER_ARCH}" != "default" ]; then \
|
RUN if [ "${MUSA_DOCKER_ARCH}" != "default" ]; then \
|
||||||
export CMAKE_ARGS="-DMUSA_ARCHITECTURES=${MUSA_DOCKER_ARCH}"; \
|
export CMAKE_ARGS="-DMUSA_ARCHITECTURES=${MUSA_DOCKER_ARCH}"; \
|
||||||
fi && \
|
fi && \
|
||||||
cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON -DLLAMA_CURL=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
|
cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_BUILD_TESTS=OFF ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
|
||||||
cmake --build build --config Release -j$(nproc)
|
cmake --build build --config Release -j$(nproc)
|
||||||
|
|
||||||
RUN mkdir -p /app/lib && \
|
RUN mkdir -p /app/lib && \
|
||||||
|
@ -40,7 +40,7 @@ WORKDIR /app
|
|||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -R)" \
|
RUN HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -R)" \
|
||||||
cmake -S . -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=$ROCM_DOCKER_ARCH -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DCMAKE_BUILD_TYPE=Release -DLLAMA_CURL=ON \
|
cmake -S . -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=$ROCM_DOCKER_ARCH -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DCMAKE_BUILD_TYPE=Release -DLLAMA_BUILD_TESTS=OFF \
|
||||||
&& cmake --build build --config Release -j$(nproc)
|
&& cmake --build build --config Release -j$(nproc)
|
||||||
|
|
||||||
RUN mkdir -p /app/lib \
|
RUN mkdir -p /app/lib \
|
||||||
|
@ -16,7 +16,7 @@ WORKDIR /app
|
|||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_VULKAN=1 -DLLAMA_CURL=1 -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON && \
|
RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_VULKAN=1 -DLLAMA_BUILD_TESTS=OFF -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON && \
|
||||||
cmake --build build --config Release -j$(nproc)
|
cmake --build build --config Release -j$(nproc)
|
||||||
|
|
||||||
RUN mkdir -p /app/lib && \
|
RUN mkdir -p /app/lib && \
|
||||||
|
@ -21,15 +21,15 @@ indent_style = tab
|
|||||||
[prompts/*.txt]
|
[prompts/*.txt]
|
||||||
insert_final_newline = unset
|
insert_final_newline = unset
|
||||||
|
|
||||||
[examples/server/public/*]
|
[tools/server/public/*]
|
||||||
indent_size = 2
|
indent_size = 2
|
||||||
|
|
||||||
[examples/server/public/deps_*]
|
[tools/server/public/deps_*]
|
||||||
trim_trailing_whitespace = unset
|
trim_trailing_whitespace = unset
|
||||||
indent_style = unset
|
indent_style = unset
|
||||||
indent_size = unset
|
indent_size = unset
|
||||||
|
|
||||||
[examples/server/deps_*]
|
[tools/server/deps_*]
|
||||||
trim_trailing_whitespace = unset
|
trim_trailing_whitespace = unset
|
||||||
indent_style = unset
|
indent_style = unset
|
||||||
indent_size = unset
|
indent_size = unset
|
||||||
@ -37,7 +37,7 @@ indent_size = unset
|
|||||||
[examples/llama.swiftui/llama.swiftui.xcodeproj/*]
|
[examples/llama.swiftui/llama.swiftui.xcodeproj/*]
|
||||||
indent_style = tab
|
indent_style = tab
|
||||||
|
|
||||||
[examples/cvector-generator/*.txt]
|
[tools/cvector-generator/*.txt]
|
||||||
trim_trailing_whitespace = unset
|
trim_trailing_whitespace = unset
|
||||||
insert_final_newline = unset
|
insert_final_newline = unset
|
||||||
|
|
||||||
@ -48,3 +48,7 @@ end_of_line = unset
|
|||||||
charset = unset
|
charset = unset
|
||||||
trim_trailing_whitespace = unset
|
trim_trailing_whitespace = unset
|
||||||
insert_final_newline = unset
|
insert_final_newline = unset
|
||||||
|
|
||||||
|
[vendor/miniaudio/miniaudio.h]
|
||||||
|
trim_trailing_whitespace = unset
|
||||||
|
insert_final_newline = unset
|
||||||
|
3
.flake8
3
.flake8
@ -2,8 +2,9 @@
|
|||||||
max-line-length = 125
|
max-line-length = 125
|
||||||
ignore = E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704,W503
|
ignore = E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704,W503
|
||||||
exclude =
|
exclude =
|
||||||
# Do not traverse examples
|
# Do not traverse examples and tools
|
||||||
examples,
|
examples,
|
||||||
|
tools,
|
||||||
# Do not include package initializers
|
# Do not include package initializers
|
||||||
__init__.py,
|
__init__.py,
|
||||||
# No need to traverse our git directory
|
# No need to traverse our git directory
|
||||||
|
22
.github/actions/get-tag-name/action.yml
vendored
Normal file
22
.github/actions/get-tag-name/action.yml
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
name: "Determine tag name"
|
||||||
|
description: "Determine the tag name to use for a release"
|
||||||
|
outputs:
|
||||||
|
name:
|
||||||
|
description: "The name of the tag"
|
||||||
|
value: ${{ steps.tag.outputs.name }}
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Determine tag name
|
||||||
|
id: tag
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
BUILD_NUMBER="$(git rev-list --count HEAD)"
|
||||||
|
SHORT_HASH="$(git rev-parse --short=7 HEAD)"
|
||||||
|
if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then
|
||||||
|
echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-')
|
||||||
|
echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
67
.github/actions/windows-setup-cuda/action.yml
vendored
Normal file
67
.github/actions/windows-setup-cuda/action.yml
vendored
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
name: "Windows - Setup CUDA Toolkit"
|
||||||
|
description: "Setup CUDA Toolkit for Windows"
|
||||||
|
inputs:
|
||||||
|
cuda_version:
|
||||||
|
description: "CUDA toolkit version"
|
||||||
|
required: true
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Install Cuda Toolkit 11.7
|
||||||
|
if: ${{ inputs.cuda_version == '11.7' }}
|
||||||
|
shell: pwsh
|
||||||
|
run: |
|
||||||
|
mkdir -p "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7"
|
||||||
|
choco install unzip -y
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cudart/windows-x86_64/cuda_cudart-windows-x86_64-11.7.99-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvcc/windows-x86_64/cuda_nvcc-windows-x86_64-11.7.99-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvrtc/windows-x86_64/cuda_nvrtc-windows-x86_64-11.7.99-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/libcublas/windows-x86_64/libcublas-windows-x86_64-11.7.4.6-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvtx/windows-x86_64/cuda_nvtx-windows-x86_64-11.7.91-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/visual_studio_integration/windows-x86_64/visual_studio_integration-windows-x86_64-11.7.91-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvprof/windows-x86_64/cuda_nvprof-windows-x86_64-11.7.101-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cccl/windows-x86_64/cuda_cccl-windows-x86_64-11.7.91-archive.zip"
|
||||||
|
unzip '*.zip' -d "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7"
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_cudart-windows-x86_64-11.7.99-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvcc-windows-x86_64-11.7.99-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvrtc-windows-x86_64-11.7.99-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\libcublas-windows-x86_64-11.7.4.6-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvtx-windows-x86_64-11.7.91-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\visual_studio_integration-windows-x86_64-11.7.91-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvprof-windows-x86_64-11.7.101-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_cccl-windows-x86_64-11.7.91-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y
|
||||||
|
echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||||
|
echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\libnvvp" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||||
|
echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
|
||||||
|
echo "CUDA_PATH_V11_7=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
|
||||||
|
|
||||||
|
- name: Install Cuda Toolkit 12.4
|
||||||
|
if: ${{ inputs.cuda_version == '12.4' }}
|
||||||
|
shell: pwsh
|
||||||
|
run: |
|
||||||
|
mkdir -p "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4"
|
||||||
|
choco install unzip -y
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cudart/windows-x86_64/cuda_cudart-windows-x86_64-12.4.127-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvcc/windows-x86_64/cuda_nvcc-windows-x86_64-12.4.131-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvrtc/windows-x86_64/cuda_nvrtc-windows-x86_64-12.4.127-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/libcublas/windows-x86_64/libcublas-windows-x86_64-12.4.5.8-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvtx/windows-x86_64/cuda_nvtx-windows-x86_64-12.4.127-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_profiler_api/windows-x86_64/cuda_profiler_api-windows-x86_64-12.4.127-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/visual_studio_integration/windows-x86_64/visual_studio_integration-windows-x86_64-12.4.127-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvprof/windows-x86_64/cuda_nvprof-windows-x86_64-12.4.127-archive.zip"
|
||||||
|
curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cccl/windows-x86_64/cuda_cccl-windows-x86_64-12.4.127-archive.zip"
|
||||||
|
unzip '*.zip' -d "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4"
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_cudart-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvcc-windows-x86_64-12.4.131-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvrtc-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\libcublas-windows-x86_64-12.4.5.8-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvtx-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_profiler_api-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\visual_studio_integration-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvprof-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
|
||||||
|
xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_cccl-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y
|
||||||
|
echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||||
|
echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\libnvvp" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||||
|
echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
|
||||||
|
echo "CUDA_PATH_V12_4=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
|
@ -5,6 +5,10 @@ inputs:
|
|||||||
description: 'CURL version'
|
description: 'CURL version'
|
||||||
required: false
|
required: false
|
||||||
default: '8.6.0_6'
|
default: '8.6.0_6'
|
||||||
|
architecture:
|
||||||
|
description: 'Architecture of the libcurl to download'
|
||||||
|
required: false
|
||||||
|
default: 'win64'
|
||||||
outputs:
|
outputs:
|
||||||
curl_path:
|
curl_path:
|
||||||
description: "Path to the downloaded libcurl"
|
description: "Path to the downloaded libcurl"
|
||||||
@ -18,8 +22,9 @@ runs:
|
|||||||
shell: powershell
|
shell: powershell
|
||||||
env:
|
env:
|
||||||
CURL_VERSION: ${{ inputs.curl_version }}
|
CURL_VERSION: ${{ inputs.curl_version }}
|
||||||
|
ARCHITECTURE: ${{ inputs.architecture }}
|
||||||
run: |
|
run: |
|
||||||
curl.exe -o $env:RUNNER_TEMP/curl.zip -L "https://curl.se/windows/dl-${env:CURL_VERSION}/curl-${env:CURL_VERSION}-win64-mingw.zip"
|
curl.exe -o $env:RUNNER_TEMP/curl.zip -L "https://curl.se/windows/dl-${env:CURL_VERSION}/curl-${env:CURL_VERSION}-${env:ARCHITECTURE}-mingw.zip"
|
||||||
mkdir $env:RUNNER_TEMP/libcurl
|
mkdir $env:RUNNER_TEMP/libcurl
|
||||||
tar.exe -xvf $env:RUNNER_TEMP/curl.zip --strip-components=1 -C $env:RUNNER_TEMP/libcurl
|
tar.exe -xvf $env:RUNNER_TEMP/curl.zip --strip-components=1 -C $env:RUNNER_TEMP/libcurl
|
||||||
echo "curl_path=$env:RUNNER_TEMP/libcurl" >> $env:GITHUB_OUTPUT
|
echo "curl_path=$env:RUNNER_TEMP/libcurl" >> $env:GITHUB_OUTPUT
|
||||||
|
13
.github/labeler.yml
vendored
13
.github/labeler.yml
vendored
@ -45,7 +45,9 @@ build:
|
|||||||
- CMakePresets.json
|
- CMakePresets.json
|
||||||
examples:
|
examples:
|
||||||
- changed-files:
|
- changed-files:
|
||||||
- any-glob-to-any-file: examples/**
|
- any-glob-to-any-file:
|
||||||
|
- examples/**
|
||||||
|
- tools/**
|
||||||
devops:
|
devops:
|
||||||
- changed-files:
|
- changed-files:
|
||||||
- any-glob-to-any-file:
|
- any-glob-to-any-file:
|
||||||
@ -70,7 +72,7 @@ android:
|
|||||||
server:
|
server:
|
||||||
- changed-files:
|
- changed-files:
|
||||||
- any-glob-to-any-file:
|
- any-glob-to-any-file:
|
||||||
- examples/server/**
|
- tools/server/**
|
||||||
ggml:
|
ggml:
|
||||||
- changed-files:
|
- changed-files:
|
||||||
- any-glob-to-any-file:
|
- any-glob-to-any-file:
|
||||||
@ -84,3 +86,10 @@ nix:
|
|||||||
embedding:
|
embedding:
|
||||||
- changed-files:
|
- changed-files:
|
||||||
- any-glob-to-any-file: examples/embedding/
|
- any-glob-to-any-file: examples/embedding/
|
||||||
|
|
||||||
|
Ascend NPU:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- ggml/include/ggml-cann.h
|
||||||
|
- ggml/src/ggml-cann/**
|
||||||
|
- docs/backend/CANN.md
|
||||||
|
30
.github/workflows/bench.yml.disabled
vendored
30
.github/workflows/bench.yml.disabled
vendored
@ -27,10 +27,10 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
paths: ['llama.cpp', 'ggml.c', 'ggml-backend.cpp', 'ggml-quants.c', '**/*.cu', 'examples/server/*.h*', 'examples/server/*.cpp']
|
paths: ['llama.cpp', 'ggml.c', 'ggml-backend.cpp', 'ggml-quants.c', '**/*.cu', 'tools/server/*.h*', 'tools/server/*.cpp']
|
||||||
pull_request_target:
|
pull_request_target:
|
||||||
types: [opened, synchronize, reopened]
|
types: [opened, synchronize, reopened]
|
||||||
paths: ['llama.cpp', 'ggml.c', 'ggml-backend.cpp', 'ggml-quants.c', '**/*.cu', 'examples/server/*.h*', 'examples/server/*.cpp']
|
paths: ['llama.cpp', 'ggml.c', 'ggml-backend.cpp', 'ggml-quants.c', '**/*.cu', 'tools/server/*.h*', 'tools/server/*.cpp']
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '04 2 * * *'
|
- cron: '04 2 * * *'
|
||||||
|
|
||||||
@ -69,7 +69,7 @@ jobs:
|
|||||||
- name: Install python env
|
- name: Install python env
|
||||||
id: pipenv
|
id: pipenv
|
||||||
run: |
|
run: |
|
||||||
cd examples/server/bench
|
cd tools/server/bench
|
||||||
python3 -m venv venv
|
python3 -m venv venv
|
||||||
source venv/bin/activate
|
source venv/bin/activate
|
||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
@ -79,7 +79,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
wget --quiet https://github.com/prometheus/prometheus/releases/download/v2.51.0/prometheus-2.51.0.linux-amd64.tar.gz
|
wget --quiet https://github.com/prometheus/prometheus/releases/download/v2.51.0/prometheus-2.51.0.linux-amd64.tar.gz
|
||||||
tar xzf prometheus*.tar.gz --strip-components=1
|
tar xzf prometheus*.tar.gz --strip-components=1
|
||||||
./prometheus --config.file=examples/server/bench/prometheus.yml &
|
./prometheus --config.file=tools/server/bench/prometheus.yml &
|
||||||
while ! nc -z localhost 9090; do
|
while ! nc -z localhost 9090; do
|
||||||
sleep 0.1
|
sleep 0.1
|
||||||
done
|
done
|
||||||
@ -92,7 +92,7 @@ jobs:
|
|||||||
- name: Install k6 and xk6-sse
|
- name: Install k6 and xk6-sse
|
||||||
id: k6_installation
|
id: k6_installation
|
||||||
run: |
|
run: |
|
||||||
cd examples/server/bench
|
cd tools/server/bench
|
||||||
go install go.k6.io/xk6/cmd/xk6@latest
|
go install go.k6.io/xk6/cmd/xk6@latest
|
||||||
xk6 build master \
|
xk6 build master \
|
||||||
--with github.com/phymbert/xk6-sse
|
--with github.com/phymbert/xk6-sse
|
||||||
@ -116,7 +116,7 @@ jobs:
|
|||||||
- name: Download the dataset
|
- name: Download the dataset
|
||||||
id: download_dataset
|
id: download_dataset
|
||||||
run: |
|
run: |
|
||||||
cd examples/server/bench
|
cd tools/server/bench
|
||||||
wget --quiet https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
|
wget --quiet https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
|
||||||
|
|
||||||
- name: Server bench
|
- name: Server bench
|
||||||
@ -126,7 +126,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
set -eux
|
set -eux
|
||||||
|
|
||||||
cd examples/server/bench
|
cd tools/server/bench
|
||||||
source venv/bin/activate
|
source venv/bin/activate
|
||||||
python bench.py \
|
python bench.py \
|
||||||
--runner-label ${{ env.RUNNER_LABEL }} \
|
--runner-label ${{ env.RUNNER_LABEL }} \
|
||||||
@ -157,9 +157,9 @@ jobs:
|
|||||||
name: bench-server-${{ github.job }}-${{ env.RUNNER_LABEL }}-${{ matrix.model }}-${{ matrix.ftype }}
|
name: bench-server-${{ github.job }}-${{ env.RUNNER_LABEL }}-${{ matrix.model }}-${{ matrix.ftype }}
|
||||||
compression-level: 9
|
compression-level: 9
|
||||||
path: |
|
path: |
|
||||||
examples/server/bench/*.jpg
|
tools/server/bench/*.jpg
|
||||||
examples/server/bench/*.json
|
tools/server/bench/*.json
|
||||||
examples/server/bench/*.log
|
tools/server/bench/*.log
|
||||||
|
|
||||||
- name: Commit status
|
- name: Commit status
|
||||||
uses: Sibz/github-status-action@v1
|
uses: Sibz/github-status-action@v1
|
||||||
@ -178,17 +178,17 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
client_id: ${{secrets.IMGUR_CLIENT_ID}}
|
client_id: ${{secrets.IMGUR_CLIENT_ID}}
|
||||||
path: |
|
path: |
|
||||||
examples/server/bench/prompt_tokens_seconds.jpg
|
tools/server/bench/prompt_tokens_seconds.jpg
|
||||||
examples/server/bench/predicted_tokens_seconds.jpg
|
tools/server/bench/predicted_tokens_seconds.jpg
|
||||||
examples/server/bench/kv_cache_usage_ratio.jpg
|
tools/server/bench/kv_cache_usage_ratio.jpg
|
||||||
examples/server/bench/requests_processing.jpg
|
tools/server/bench/requests_processing.jpg
|
||||||
|
|
||||||
- name: Extract mermaid
|
- name: Extract mermaid
|
||||||
id: set_mermaid
|
id: set_mermaid
|
||||||
run: |
|
run: |
|
||||||
set -eux
|
set -eux
|
||||||
|
|
||||||
cd examples/server/bench
|
cd tools/server/bench
|
||||||
PROMPT_TOKENS_SECONDS=$(cat prompt_tokens_seconds.mermaid)
|
PROMPT_TOKENS_SECONDS=$(cat prompt_tokens_seconds.mermaid)
|
||||||
echo "PROMPT_TOKENS_SECONDS<<EOF" >> $GITHUB_ENV
|
echo "PROMPT_TOKENS_SECONDS<<EOF" >> $GITHUB_ENV
|
||||||
echo "$PROMPT_TOKENS_SECONDS" >> $GITHUB_ENV
|
echo "$PROMPT_TOKENS_SECONDS" >> $GITHUB_ENV
|
||||||
|
288
.github/workflows/build-linux-cross.yml
vendored
288
.github/workflows/build-linux-cross.yml
vendored
@ -4,29 +4,37 @@ on:
|
|||||||
workflow_call:
|
workflow_call:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
ubuntu-latest-riscv64-cpu-cross:
|
ubuntu-24-riscv64-cpu-cross:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Setup Riscv
|
- name: Setup Riscv
|
||||||
run: |
|
run: |
|
||||||
sudo dpkg --add-architecture riscv64
|
sudo dpkg --add-architecture riscv64
|
||||||
sudo sed -i 's|http://azure.archive.ubuntu.com/ubuntu|http://ports.ubuntu.com/ubuntu-ports|g' \
|
|
||||||
/etc/apt/sources.list /etc/apt/apt-mirrors.txt
|
# Add arch-specific repositories for non-amd64 architectures
|
||||||
sudo apt-get clean
|
cat << EOF | sudo tee /etc/apt/sources.list.d/riscv64-ports.list
|
||||||
sudo apt-get update
|
deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble main universe
|
||||||
|
deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble-updates main universe
|
||||||
|
deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble-security main universe
|
||||||
|
deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble-backports main universe
|
||||||
|
EOF
|
||||||
|
|
||||||
|
sudo apt-get update || true ;# Prevent failure due to missing URLs.
|
||||||
|
|
||||||
sudo apt-get install -y --no-install-recommends \
|
sudo apt-get install -y --no-install-recommends \
|
||||||
build-essential \
|
build-essential \
|
||||||
gcc-14-riscv64-linux-gnu \
|
gcc-14-riscv64-linux-gnu \
|
||||||
g++-14-riscv64-linux-gnu \
|
g++-14-riscv64-linux-gnu
|
||||||
libcurl4-openssl-dev:riscv64
|
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
cmake -B build -DCMAKE_BUILD_TYPE=Release \
|
cmake -B build -DLLAMA_CURL=OFF \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
-DGGML_OPENMP=OFF \
|
-DGGML_OPENMP=OFF \
|
||||||
-DLLAMA_BUILD_EXAMPLES=ON \
|
-DLLAMA_BUILD_EXAMPLES=ON \
|
||||||
|
-DLLAMA_BUILD_TOOLS=ON \
|
||||||
-DLLAMA_BUILD_TESTS=OFF \
|
-DLLAMA_BUILD_TESTS=OFF \
|
||||||
-DCMAKE_SYSTEM_NAME=Linux \
|
-DCMAKE_SYSTEM_NAME=Linux \
|
||||||
-DCMAKE_SYSTEM_PROCESSOR=riscv64 \
|
-DCMAKE_SYSTEM_PROCESSOR=riscv64 \
|
||||||
@ -40,35 +48,40 @@ jobs:
|
|||||||
|
|
||||||
cmake --build build --config Release -j $(nproc)
|
cmake --build build --config Release -j $(nproc)
|
||||||
|
|
||||||
ubuntu-latest-riscv64-vulkan-cross:
|
ubuntu-24-riscv64-vulkan-cross:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Riscv
|
- name: Setup Riscv
|
||||||
run: |
|
run: |
|
||||||
sudo dpkg --add-architecture riscv64
|
sudo dpkg --add-architecture riscv64
|
||||||
sudo sed -i 's|http://azure.archive.ubuntu.com/ubuntu|http://ports.ubuntu.com/ubuntu-ports|g' \
|
|
||||||
/etc/apt/sources.list /etc/apt/apt-mirrors.txt
|
# Add arch-specific repositories for non-amd64 architectures
|
||||||
sudo apt-get clean
|
cat << EOF | sudo tee /etc/apt/sources.list.d/riscv64-ports.list
|
||||||
sudo apt-get update
|
deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble main universe
|
||||||
|
deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble-updates main universe
|
||||||
|
deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble-security main universe
|
||||||
|
deb [arch=riscv64] http://ports.ubuntu.com/ubuntu-ports/ noble-backports main universe
|
||||||
|
EOF
|
||||||
|
|
||||||
|
sudo apt-get update || true ;# Prevent failure due to missing URLs.
|
||||||
|
|
||||||
sudo apt-get install -y --no-install-recommends \
|
sudo apt-get install -y --no-install-recommends \
|
||||||
build-essential \
|
build-essential \
|
||||||
glslc \
|
glslc \
|
||||||
gcc-14-riscv64-linux-gnu \
|
gcc-14-riscv64-linux-gnu \
|
||||||
g++-14-riscv64-linux-gnu \
|
g++-14-riscv64-linux-gnu \
|
||||||
libvulkan-dev:riscv64 \
|
libvulkan-dev:riscv64
|
||||||
libcurl4-openssl-dev:riscv64
|
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
cmake -B build -DCMAKE_BUILD_TYPE=Release \
|
cmake -B build -DLLAMA_CURL=OFF \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
-DGGML_VULKAN=ON \
|
-DGGML_VULKAN=ON \
|
||||||
-DGGML_OPENMP=OFF \
|
-DGGML_OPENMP=OFF \
|
||||||
-DLLAMA_BUILD_EXAMPLES=ON \
|
-DLLAMA_BUILD_EXAMPLES=ON \
|
||||||
|
-DLLAMA_BUILD_TOOLS=ON \
|
||||||
-DLLAMA_BUILD_TESTS=OFF \
|
-DLLAMA_BUILD_TESTS=OFF \
|
||||||
-DCMAKE_SYSTEM_NAME=Linux \
|
-DCMAKE_SYSTEM_NAME=Linux \
|
||||||
-DCMAKE_SYSTEM_PROCESSOR=riscv64 \
|
-DCMAKE_SYSTEM_PROCESSOR=riscv64 \
|
||||||
@ -82,34 +95,39 @@ jobs:
|
|||||||
|
|
||||||
cmake --build build --config Release -j $(nproc)
|
cmake --build build --config Release -j $(nproc)
|
||||||
|
|
||||||
ubuntu-latest-arm64-vulkan-cross:
|
ubuntu-24-arm64-vulkan-cross:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-24.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Arm64
|
- name: Setup Arm64
|
||||||
run: |
|
run: |
|
||||||
sudo dpkg --add-architecture arm64
|
sudo dpkg --add-architecture arm64
|
||||||
sudo sed -i 's|http://azure.archive.ubuntu.com/ubuntu|http://ports.ubuntu.com/ubuntu-ports|g' \
|
|
||||||
/etc/apt/sources.list /etc/apt/apt-mirrors.txt
|
# Add arch-specific repositories for non-amd64 architectures
|
||||||
sudo apt-get clean
|
cat << EOF | sudo tee /etc/apt/sources.list.d/arm64-ports.list
|
||||||
sudo apt-get update
|
deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ noble main universe
|
||||||
|
deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ noble-updates main universe
|
||||||
|
deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ noble-security main universe
|
||||||
|
deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ noble-backports main universe
|
||||||
|
EOF
|
||||||
|
|
||||||
|
sudo apt-get update || true ;# Prevent failure due to missing URLs.
|
||||||
|
|
||||||
sudo apt-get install -y --no-install-recommends \
|
sudo apt-get install -y --no-install-recommends \
|
||||||
build-essential \
|
build-essential \
|
||||||
glslc \
|
glslc \
|
||||||
crossbuild-essential-arm64 \
|
crossbuild-essential-arm64 \
|
||||||
libvulkan-dev:arm64 \
|
libvulkan-dev:arm64
|
||||||
libcurl4-openssl-dev:arm64
|
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
cmake -B build -DCMAKE_BUILD_TYPE=Release \
|
cmake -B build -DLLAMA_CURL=OFF \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
-DGGML_VULKAN=ON \
|
-DGGML_VULKAN=ON \
|
||||||
-DGGML_OPENMP=OFF \
|
-DGGML_OPENMP=OFF \
|
||||||
-DLLAMA_BUILD_EXAMPLES=ON \
|
-DLLAMA_BUILD_EXAMPLES=ON \
|
||||||
|
-DLLAMA_BUILD_TOOLS=ON \
|
||||||
-DLLAMA_BUILD_TESTS=OFF \
|
-DLLAMA_BUILD_TESTS=OFF \
|
||||||
-DCMAKE_SYSTEM_NAME=Linux \
|
-DCMAKE_SYSTEM_NAME=Linux \
|
||||||
-DCMAKE_SYSTEM_PROCESSOR=aarch64 \
|
-DCMAKE_SYSTEM_PROCESSOR=aarch64 \
|
||||||
@ -122,3 +140,207 @@ jobs:
|
|||||||
-DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=BOTH
|
-DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=BOTH
|
||||||
|
|
||||||
cmake --build build --config Release -j $(nproc)
|
cmake --build build --config Release -j $(nproc)
|
||||||
|
|
||||||
|
ubuntu-24-ppc64el-cpu-cross:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Setup PowerPC64le
|
||||||
|
run: |
|
||||||
|
sudo dpkg --add-architecture ppc64el
|
||||||
|
|
||||||
|
# Add arch-specific repositories for non-amd64 architectures
|
||||||
|
cat << EOF | sudo tee /etc/apt/sources.list.d/ppc64el-ports.list
|
||||||
|
deb [arch=ppc64el] http://ports.ubuntu.com/ubuntu-ports/ noble main universe
|
||||||
|
deb [arch=ppc64el] http://ports.ubuntu.com/ubuntu-ports/ noble-updates main universe
|
||||||
|
deb [arch=ppc64el] http://ports.ubuntu.com/ubuntu-ports/ noble-security main universe
|
||||||
|
deb [arch=ppc64el] http://ports.ubuntu.com/ubuntu-ports/ noble-backports main universe
|
||||||
|
EOF
|
||||||
|
|
||||||
|
sudo apt-get update || true ;# Prevent failure due to missing URLs.
|
||||||
|
|
||||||
|
sudo apt-get install -y --no-install-recommends \
|
||||||
|
build-essential \
|
||||||
|
gcc-14-powerpc64le-linux-gnu \
|
||||||
|
g++-14-powerpc64le-linux-gnu
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
cmake -B build -DLLAMA_CURL=OFF \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DGGML_OPENMP=OFF \
|
||||||
|
-DLLAMA_BUILD_EXAMPLES=ON \
|
||||||
|
-DLLAMA_BUILD_TOOLS=ON \
|
||||||
|
-DLLAMA_BUILD_TESTS=OFF \
|
||||||
|
-DCMAKE_SYSTEM_NAME=Linux \
|
||||||
|
-DCMAKE_SYSTEM_PROCESSOR=ppc64 \
|
||||||
|
-DCMAKE_C_COMPILER=powerpc64le-linux-gnu-gcc-14 \
|
||||||
|
-DCMAKE_CXX_COMPILER=powerpc64le-linux-gnu-g++-14 \
|
||||||
|
-DCMAKE_POSITION_INDEPENDENT_CODE=ON \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH=/usr/lib/powerpc64le-linux-gnu \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH_MODE_PROGRAM=NEVER \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH_MODE_LIBRARY=ONLY \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=BOTH
|
||||||
|
|
||||||
|
cmake --build build --config Release -j $(nproc)
|
||||||
|
|
||||||
|
ubuntu-24-ppc64el-vulkan-cross:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Setup PowerPC64le
|
||||||
|
run: |
|
||||||
|
sudo dpkg --add-architecture ppc64el
|
||||||
|
|
||||||
|
# Add arch-specific repositories for non-amd64 architectures
|
||||||
|
cat << EOF | sudo tee /etc/apt/sources.list.d/ppc64el-ports.list
|
||||||
|
deb [arch=ppc64el] http://ports.ubuntu.com/ubuntu-ports/ noble main universe
|
||||||
|
deb [arch=ppc64el] http://ports.ubuntu.com/ubuntu-ports/ noble-updates main universe
|
||||||
|
deb [arch=ppc64el] http://ports.ubuntu.com/ubuntu-ports/ noble-security main universe
|
||||||
|
deb [arch=ppc64el] http://ports.ubuntu.com/ubuntu-ports/ noble-backports main universe
|
||||||
|
EOF
|
||||||
|
|
||||||
|
sudo apt-get update || true ;# Prevent failure due to missing URLs.
|
||||||
|
|
||||||
|
sudo apt-get install -y --no-install-recommends \
|
||||||
|
build-essential \
|
||||||
|
glslc \
|
||||||
|
gcc-14-powerpc64le-linux-gnu \
|
||||||
|
g++-14-powerpc64le-linux-gnu \
|
||||||
|
libvulkan-dev:ppc64el
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
cmake -B build -DLLAMA_CURL=OFF \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DGGML_VULKAN=ON \
|
||||||
|
-DGGML_OPENMP=OFF \
|
||||||
|
-DLLAMA_BUILD_EXAMPLES=ON \
|
||||||
|
-DLLAMA_BUILD_TOOLS=ON \
|
||||||
|
-DLLAMA_BUILD_TESTS=OFF \
|
||||||
|
-DCMAKE_SYSTEM_NAME=Linux \
|
||||||
|
-DCMAKE_SYSTEM_PROCESSOR=ppc64 \
|
||||||
|
-DCMAKE_C_COMPILER=powerpc64le-linux-gnu-gcc-14 \
|
||||||
|
-DCMAKE_CXX_COMPILER=powerpc64le-linux-gnu-g++-14 \
|
||||||
|
-DCMAKE_POSITION_INDEPENDENT_CODE=ON \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH=/usr/lib/powerpc64le-linux-gnu \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH_MODE_PROGRAM=NEVER \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH_MODE_LIBRARY=ONLY \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=BOTH
|
||||||
|
|
||||||
|
cmake --build build --config Release -j $(nproc)
|
||||||
|
|
||||||
|
debian-13-loongarch64-cpu-cross:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
container: debian@sha256:653dfb9f86c3782e8369d5f7d29bb8faba1f4bff9025db46e807fa4c22903671
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Setup LoongArch
|
||||||
|
run: |
|
||||||
|
rm -f /etc/apt/sources.list.d/*
|
||||||
|
cat << EOF | tee /etc/apt/sources.list.d/debian-ports.list
|
||||||
|
deb http://snapshot.debian.org/archive/debian/20250515T202920Z/ trixie main
|
||||||
|
EOF
|
||||||
|
( echo 'quiet "true";'; \
|
||||||
|
echo 'APT::Get::Assume-Yes "true";'; \
|
||||||
|
echo 'APT::Install-Recommends "false";'; \
|
||||||
|
echo 'Acquire::Check-Valid-Until "false";'; \
|
||||||
|
echo 'Acquire::Retries "5";'; \
|
||||||
|
) > /etc/apt/apt.conf.d/99snapshot-repos
|
||||||
|
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y ca-certificates debian-ports-archive-keyring cmake git zip
|
||||||
|
dpkg --add-architecture loong64
|
||||||
|
|
||||||
|
# Add arch-specific repositories for non-amd64 architectures
|
||||||
|
cat << EOF | tee /etc/apt/sources.list.d/loong64-ports.list
|
||||||
|
deb [arch=loong64] http://snapshot.debian.org/archive/debian-ports/20250515T194251Z/ sid main
|
||||||
|
EOF
|
||||||
|
|
||||||
|
apt-get update || true ;# Prevent failure due to missing URLs.
|
||||||
|
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
build-essential \
|
||||||
|
gcc-14-loongarch64-linux-gnu \
|
||||||
|
g++-14-loongarch64-linux-gnu
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
cmake -B build -DLLAMA_CURL=OFF \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DGGML_OPENMP=OFF \
|
||||||
|
-DLLAMA_BUILD_EXAMPLES=ON \
|
||||||
|
-DLLAMA_BUILD_TOOLS=ON \
|
||||||
|
-DLLAMA_BUILD_TESTS=OFF \
|
||||||
|
-DCMAKE_SYSTEM_NAME=Linux \
|
||||||
|
-DCMAKE_SYSTEM_PROCESSOR=loongarch64 \
|
||||||
|
-DCMAKE_C_COMPILER=loongarch64-linux-gnu-gcc-14 \
|
||||||
|
-DCMAKE_CXX_COMPILER=loongarch64-linux-gnu-g++-14 \
|
||||||
|
-DCMAKE_POSITION_INDEPENDENT_CODE=ON \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH=/usr/lib/loongarch64-linux-gnu \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH_MODE_PROGRAM=NEVER \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH_MODE_LIBRARY=ONLY \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=BOTH
|
||||||
|
|
||||||
|
cmake --build build --config Release -j $(nproc)
|
||||||
|
|
||||||
|
debian-13-loongarch64-vulkan-cross:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
container: debian@sha256:653dfb9f86c3782e8369d5f7d29bb8faba1f4bff9025db46e807fa4c22903671
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Setup LoongArch
|
||||||
|
run: |
|
||||||
|
rm -f /etc/apt/sources.list.d/*
|
||||||
|
cat << EOF | tee /etc/apt/sources.list.d/debian-ports.list
|
||||||
|
deb http://snapshot.debian.org/archive/debian/20250515T202920Z/ trixie main
|
||||||
|
EOF
|
||||||
|
( echo 'quiet "true";'; \
|
||||||
|
echo 'APT::Get::Assume-Yes "true";'; \
|
||||||
|
echo 'APT::Install-Recommends "false";'; \
|
||||||
|
echo 'Acquire::Check-Valid-Until "false";'; \
|
||||||
|
echo 'Acquire::Retries "5";'; \
|
||||||
|
) > /etc/apt/apt.conf.d/99snapshot-repos
|
||||||
|
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y ca-certificates debian-ports-archive-keyring cmake git zip
|
||||||
|
dpkg --add-architecture loong64
|
||||||
|
|
||||||
|
# Add arch-specific repositories for non-amd64 architectures
|
||||||
|
cat << EOF | tee /etc/apt/sources.list.d/loong64-ports.list
|
||||||
|
deb [arch=loong64] http://snapshot.debian.org/archive/debian-ports/20250515T194251Z/ sid main
|
||||||
|
EOF
|
||||||
|
|
||||||
|
apt-get update || true ;# Prevent failure due to missing URLs.
|
||||||
|
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
build-essential \
|
||||||
|
glslc \
|
||||||
|
gcc-14-loongarch64-linux-gnu \
|
||||||
|
g++-14-loongarch64-linux-gnu \
|
||||||
|
libvulkan-dev:loong64
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
cmake -B build -DLLAMA_CURL=OFF \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DGGML_VULKAN=ON \
|
||||||
|
-DGGML_OPENMP=OFF \
|
||||||
|
-DLLAMA_BUILD_EXAMPLES=ON \
|
||||||
|
-DLLAMA_BUILD_TOOLS=ON \
|
||||||
|
-DLLAMA_BUILD_TESTS=OFF \
|
||||||
|
-DCMAKE_SYSTEM_NAME=Linux \
|
||||||
|
-DCMAKE_SYSTEM_PROCESSOR=loongarch64 \
|
||||||
|
-DCMAKE_C_COMPILER=loongarch64-linux-gnu-gcc-14 \
|
||||||
|
-DCMAKE_CXX_COMPILER=loongarch64-linux-gnu-g++-14 \
|
||||||
|
-DCMAKE_POSITION_INDEPENDENT_CODE=ON \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH=/usr/lib/loongarch64-linux-gnu \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH_MODE_PROGRAM=NEVER \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH_MODE_LIBRARY=ONLY \
|
||||||
|
-DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=BOTH
|
||||||
|
|
||||||
|
cmake --build build --config Release -j $(nproc)
|
||||||
|
817
.github/workflows/build.yml
vendored
817
.github/workflows/build.yml
vendored
File diff suppressed because it is too large
Load Diff
7
.github/workflows/docker.yml
vendored
7
.github/workflows/docker.yml
vendored
@ -36,10 +36,13 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
config:
|
config:
|
||||||
# Multi-stage build
|
# Multi-stage build
|
||||||
- { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, free_disk_space: false }
|
# Note: the arm64 images are failing, which prevents the amd64 images from being built
|
||||||
|
# https://github.com/ggml-org/llama.cpp/issues/11888
|
||||||
|
#- { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, free_disk_space: false }
|
||||||
|
- { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false }
|
||||||
- { tag: "cuda", dockerfile: ".devops/cuda.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false }
|
- { tag: "cuda", dockerfile: ".devops/cuda.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false }
|
||||||
- { tag: "musa", dockerfile: ".devops/musa.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: true }
|
- { tag: "musa", dockerfile: ".devops/musa.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: true }
|
||||||
- { tag: "intel", dockerfile: ".devops/intel.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false }
|
- { tag: "intel", dockerfile: ".devops/intel.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: true }
|
||||||
- { tag: "vulkan", dockerfile: ".devops/vulkan.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false }
|
- { tag: "vulkan", dockerfile: ".devops/vulkan.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false }
|
||||||
# Note: the rocm images are failing due to a compiler error and are disabled until this is fixed to allow the workflow to complete
|
# Note: the rocm images are failing due to a compiler error and are disabled until this is fixed to allow the workflow to complete
|
||||||
#- {tag: "rocm", dockerfile: ".devops/rocm.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, free_disk_space: true }
|
#- {tag: "rocm", dockerfile: ".devops/rocm.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, free_disk_space: true }
|
||||||
|
749
.github/workflows/release.yml
vendored
Normal file
749
.github/workflows/release.yml
vendored
Normal file
@ -0,0 +1,749 @@
|
|||||||
|
name: Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch: # allows manual triggering
|
||||||
|
inputs:
|
||||||
|
create_release:
|
||||||
|
description: 'Create new release'
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths: ['.github/workflows/release.yml', '**/CMakeLists.txt', '**/.cmake', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal', '**/*.comp']
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
|
||||||
|
CMAKE_ARGS: "-DLLAMA_BUILD_EXAMPLES=OFF -DLLAMA_BUILD_TESTS=OFF -DLLAMA_BUILD_TOOLS=ON -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
macOS-arm64:
|
||||||
|
runs-on: macos-14
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: ccache
|
||||||
|
uses: hendrikmuhs/ccache-action@v1.2.16
|
||||||
|
with:
|
||||||
|
key: macOS-latest-cmake-arm64
|
||||||
|
evict-old-files: 1d
|
||||||
|
|
||||||
|
- name: Dependencies
|
||||||
|
id: depends
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
brew update
|
||||||
|
brew install curl
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
sysctl -a
|
||||||
|
cmake -B build \
|
||||||
|
-DCMAKE_BUILD_RPATH="@loader_path" \
|
||||||
|
-DLLAMA_FATAL_WARNINGS=ON \
|
||||||
|
-DGGML_METAL_USE_BF16=ON \
|
||||||
|
-DGGML_METAL_EMBED_LIBRARY=ON \
|
||||||
|
-DGGML_RPC=ON \
|
||||||
|
${{ env.CMAKE_ARGS }}
|
||||||
|
cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
|
||||||
|
|
||||||
|
- name: Determine tag name
|
||||||
|
id: tag
|
||||||
|
uses: ./.github/actions/get-tag-name
|
||||||
|
|
||||||
|
- name: Pack artifacts
|
||||||
|
id: pack_artifacts
|
||||||
|
run: |
|
||||||
|
cp LICENSE ./build/bin/
|
||||||
|
zip -r llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip ./build/bin/*
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
path: llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip
|
||||||
|
name: llama-bin-macos-arm64.zip
|
||||||
|
|
||||||
|
macOS-x64:
|
||||||
|
runs-on: macos-13
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: ccache
|
||||||
|
uses: hendrikmuhs/ccache-action@v1.2.16
|
||||||
|
with:
|
||||||
|
key: macOS-latest-cmake-x64
|
||||||
|
evict-old-files: 1d
|
||||||
|
|
||||||
|
- name: Dependencies
|
||||||
|
id: depends
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
brew update
|
||||||
|
brew install curl
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
sysctl -a
|
||||||
|
# Metal is disabled due to intermittent failures with Github runners not having a GPU:
|
||||||
|
# https://github.com/ggml-org/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313
|
||||||
|
cmake -B build \
|
||||||
|
-DCMAKE_BUILD_RPATH="@loader_path" \
|
||||||
|
-DLLAMA_FATAL_WARNINGS=ON \
|
||||||
|
-DGGML_METAL=OFF \
|
||||||
|
-DGGML_RPC=ON
|
||||||
|
cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
|
||||||
|
|
||||||
|
- name: Determine tag name
|
||||||
|
id: tag
|
||||||
|
uses: ./.github/actions/get-tag-name
|
||||||
|
|
||||||
|
- name: Pack artifacts
|
||||||
|
id: pack_artifacts
|
||||||
|
run: |
|
||||||
|
cp LICENSE ./build/bin/
|
||||||
|
zip -r llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip ./build/bin/*
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
path: llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip
|
||||||
|
name: llama-bin-macos-x64.zip
|
||||||
|
|
||||||
|
ubuntu-22-cpu:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- build: 'x64'
|
||||||
|
os: ubuntu-22.04
|
||||||
|
# GGML_BACKEND_DL and GGML_CPU_ALL_VARIANTS are not currently supported on arm
|
||||||
|
# - build: 'arm64'
|
||||||
|
# os: ubuntu-22.04-arm
|
||||||
|
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: ccache
|
||||||
|
uses: hendrikmuhs/ccache-action@v1.2.16
|
||||||
|
with:
|
||||||
|
key: ubuntu-cpu-cmake
|
||||||
|
evict-old-files: 1d
|
||||||
|
|
||||||
|
- name: Dependencies
|
||||||
|
id: depends
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install build-essential libcurl4-openssl-dev
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
cmake -B build \
|
||||||
|
-DGGML_BACKEND_DL=ON \
|
||||||
|
-DGGML_NATIVE=OFF \
|
||||||
|
-DGGML_CPU_ALL_VARIANTS=ON \
|
||||||
|
-DLLAMA_FATAL_WARNINGS=ON \
|
||||||
|
${{ env.CMAKE_ARGS }}
|
||||||
|
cmake --build build --config Release -j $(nproc)
|
||||||
|
|
||||||
|
- name: Determine tag name
|
||||||
|
id: tag
|
||||||
|
uses: ./.github/actions/get-tag-name
|
||||||
|
|
||||||
|
- name: Pack artifacts
|
||||||
|
id: pack_artifacts
|
||||||
|
run: |
|
||||||
|
cp LICENSE ./build/bin/
|
||||||
|
zip -r llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.zip ./build/bin/*
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.zip
|
||||||
|
name: llama-bin-ubuntu-${{ matrix.build }}.zip
|
||||||
|
|
||||||
|
ubuntu-22-vulkan:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: ccache
|
||||||
|
uses: hendrikmuhs/ccache-action@v1.2.16
|
||||||
|
with:
|
||||||
|
key: ubuntu-22-cmake-vulkan
|
||||||
|
evict-old-files: 1d
|
||||||
|
|
||||||
|
- name: Dependencies
|
||||||
|
id: depends
|
||||||
|
run: |
|
||||||
|
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add -
|
||||||
|
sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list
|
||||||
|
sudo apt-get update -y
|
||||||
|
sudo apt-get install -y build-essential mesa-vulkan-drivers vulkan-sdk libcurl4-openssl-dev
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
cmake -B build \
|
||||||
|
-DGGML_BACKEND_DL=ON \
|
||||||
|
-DGGML_NATIVE=OFF \
|
||||||
|
-DGGML_CPU_ALL_VARIANTS=ON \
|
||||||
|
-DGGML_VULKAN=ON \
|
||||||
|
${{ env.CMAKE_ARGS }}
|
||||||
|
cmake --build build --config Release -j $(nproc)
|
||||||
|
|
||||||
|
- name: Determine tag name
|
||||||
|
id: tag
|
||||||
|
uses: ./.github/actions/get-tag-name
|
||||||
|
|
||||||
|
- name: Pack artifacts
|
||||||
|
id: pack_artifacts
|
||||||
|
run: |
|
||||||
|
cp LICENSE ./build/bin/
|
||||||
|
zip -r llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.zip ./build/bin/*
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.zip
|
||||||
|
name: llama-bin-ubuntu-vulkan-x64.zip
|
||||||
|
|
||||||
|
windows-cpu:
|
||||||
|
runs-on: windows-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- arch: 'x64'
|
||||||
|
- arch: 'arm64'
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: ccache
|
||||||
|
uses: hendrikmuhs/ccache-action@v1.2.16
|
||||||
|
with:
|
||||||
|
key: windows-latest-cmake-cpu-${{ matrix.arch }}
|
||||||
|
variant: ccache
|
||||||
|
evict-old-files: 1d
|
||||||
|
|
||||||
|
- name: Install Ninja
|
||||||
|
run: |
|
||||||
|
choco install ninja
|
||||||
|
|
||||||
|
- name: libCURL
|
||||||
|
id: get_libcurl
|
||||||
|
uses: ./.github/actions/windows-setup-curl
|
||||||
|
with:
|
||||||
|
architecture: ${{ matrix.arch == 'x64' && 'win64' || 'win64a' }}
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
shell: cmd
|
||||||
|
env:
|
||||||
|
CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }}
|
||||||
|
run: |
|
||||||
|
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" ${{ matrix.arch }}
|
||||||
|
cmake -S . -B build -G "Ninja Multi-Config" ^
|
||||||
|
-D CMAKE_TOOLCHAIN_FILE=cmake/${{ matrix.arch }}-windows-llvm.cmake ^
|
||||||
|
-DGGML_NATIVE=OFF ^
|
||||||
|
-DGGML_BACKEND_DL=ON ^
|
||||||
|
-DGGML_CPU_ALL_VARIANTS=${{ matrix.arch == 'x64' && 'ON' || 'OFF' }} ^
|
||||||
|
-DGGML_OPENMP=ON ^
|
||||||
|
-DCURL_LIBRARY="%CURL_PATH%/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="%CURL_PATH%/include" ^
|
||||||
|
${{ env.CMAKE_ARGS }}
|
||||||
|
cmake --build build --config Release
|
||||||
|
|
||||||
|
- name: Pack artifacts
|
||||||
|
id: pack_artifacts
|
||||||
|
env:
|
||||||
|
CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }}
|
||||||
|
run: |
|
||||||
|
Copy-Item $env:CURL_PATH\bin\libcurl-${{ matrix.arch }}.dll .\build\bin\Release\
|
||||||
|
Copy-Item "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Redist\MSVC\14.42.34433\debug_nonredist\${{ matrix.arch }}\Microsoft.VC143.OpenMP.LLVM\libomp140.${{ matrix.arch == 'x64' && 'x86_64' || 'aarch64' }}.dll" .\build\bin\Release\
|
||||||
|
7z a llama-bin-win-cpu-${{ matrix.arch }}.zip .\build\bin\Release\*
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
path: llama-bin-win-cpu-${{ matrix.arch }}.zip
|
||||||
|
name: llama-bin-win-cpu-${{ matrix.arch }}.zip
|
||||||
|
|
||||||
|
windows:
|
||||||
|
runs-on: windows-latest
|
||||||
|
|
||||||
|
env:
|
||||||
|
OPENBLAS_VERSION: 0.3.23
|
||||||
|
VULKAN_VERSION: 1.4.309.0
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- backend: 'vulkan'
|
||||||
|
arch: 'x64'
|
||||||
|
defines: '-DGGML_VULKAN=ON'
|
||||||
|
target: 'ggml-vulkan'
|
||||||
|
- backend: 'opencl-adreno'
|
||||||
|
arch: 'arm64'
|
||||||
|
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON'
|
||||||
|
target: 'ggml-opencl'
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: ccache
|
||||||
|
uses: hendrikmuhs/ccache-action@v1.2.16
|
||||||
|
with:
|
||||||
|
key: windows-latest-cmake-${{ matrix.backend }}-${{ matrix.arch }}
|
||||||
|
variant: ccache
|
||||||
|
evict-old-files: 1d
|
||||||
|
|
||||||
|
- name: Install Vulkan SDK
|
||||||
|
id: get_vulkan
|
||||||
|
if: ${{ matrix.backend == 'vulkan' }}
|
||||||
|
run: |
|
||||||
|
curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/VulkanSDK-${env:VULKAN_VERSION}-Installer.exe"
|
||||||
|
& "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install
|
||||||
|
Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}"
|
||||||
|
Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin"
|
||||||
|
|
||||||
|
- name: Install Ninja
|
||||||
|
id: install_ninja
|
||||||
|
run: |
|
||||||
|
choco install ninja
|
||||||
|
|
||||||
|
- name: Install OpenCL Headers and Libs
|
||||||
|
id: install_opencl
|
||||||
|
if: ${{ matrix.backend == 'opencl-adreno' && matrix.arch == 'arm64' }}
|
||||||
|
run: |
|
||||||
|
git clone https://github.com/KhronosGroup/OpenCL-Headers
|
||||||
|
cd OpenCL-Headers
|
||||||
|
cmake -B build `
|
||||||
|
-DBUILD_TESTING=OFF `
|
||||||
|
-DOPENCL_HEADERS_BUILD_TESTING=OFF `
|
||||||
|
-DOPENCL_HEADERS_BUILD_CXX_TESTS=OFF `
|
||||||
|
-DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release"
|
||||||
|
cmake --build build --target install
|
||||||
|
git clone https://github.com/KhronosGroup/OpenCL-ICD-Loader
|
||||||
|
cd OpenCL-ICD-Loader
|
||||||
|
cmake -B build-arm64-release `
|
||||||
|
-A arm64 `
|
||||||
|
-DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" `
|
||||||
|
-DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release"
|
||||||
|
cmake --build build-arm64-release --target install --config release
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
cmake -S . -B build ${{ matrix.defines }} -DGGML_NATIVE=OFF -DGGML_CPU=OFF -DGGML_BACKEND_DL=ON -DLLAMA_CURL=OFF
|
||||||
|
cmake --build build --config Release --target ${{ matrix.target }}
|
||||||
|
|
||||||
|
- name: Pack artifacts
|
||||||
|
id: pack_artifacts
|
||||||
|
run: |
|
||||||
|
7z a llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip .\build\bin\Release\${{ matrix.target }}.dll
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
path: llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip
|
||||||
|
name: llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip
|
||||||
|
|
||||||
|
windows-cuda:
|
||||||
|
runs-on: windows-2022
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
cuda: ['12.4']
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install ccache
|
||||||
|
uses: hendrikmuhs/ccache-action@v1.2.16
|
||||||
|
with:
|
||||||
|
key: windows-cuda-${{ matrix.cuda }}
|
||||||
|
variant: ccache
|
||||||
|
evict-old-files: 1d
|
||||||
|
|
||||||
|
- name: Install Cuda Toolkit
|
||||||
|
uses: ./.github/actions/windows-setup-cuda
|
||||||
|
with:
|
||||||
|
cuda_version: ${{ matrix.cuda }}
|
||||||
|
|
||||||
|
- name: Install Ninja
|
||||||
|
id: install_ninja
|
||||||
|
run: |
|
||||||
|
choco install ninja
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
shell: cmd
|
||||||
|
run: |
|
||||||
|
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
|
||||||
|
cmake -S . -B build -G "Ninja Multi-Config" ^
|
||||||
|
-DGGML_BACKEND_DL=ON ^
|
||||||
|
-DGGML_NATIVE=OFF ^
|
||||||
|
-DGGML_CPU=OFF ^
|
||||||
|
-DGGML_CUDA=ON ^
|
||||||
|
-DLLAMA_CURL=OFF
|
||||||
|
set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1
|
||||||
|
cmake --build build --config Release -j %NINJA_JOBS% --target ggml-cuda
|
||||||
|
|
||||||
|
- name: Pack artifacts
|
||||||
|
id: pack_artifacts
|
||||||
|
run: |
|
||||||
|
7z a llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip .\build\bin\Release\ggml-cuda.dll
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
path: llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
|
||||||
|
name: llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
|
||||||
|
|
||||||
|
- name: Copy and pack Cuda runtime
|
||||||
|
run: |
|
||||||
|
echo "Cuda install location: ${{ env.CUDA_PATH }}"
|
||||||
|
$dst='.\build\bin\cudart\'
|
||||||
|
robocopy "${{env.CUDA_PATH}}\bin" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
|
||||||
|
robocopy "${{env.CUDA_PATH}}\lib" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll
|
||||||
|
7z a cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip $dst\*
|
||||||
|
|
||||||
|
- name: Upload Cuda runtime
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
path: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
|
||||||
|
name: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
|
||||||
|
|
||||||
|
windows-sycl:
|
||||||
|
runs-on: windows-latest
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
|
||||||
|
env:
|
||||||
|
WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/7cd9bba0-7aab-4e30-b3ae-2221006a4a05/intel-oneapi-base-toolkit-2025.1.1.34_offline.exe
|
||||||
|
WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel:intel.oneapi.win.dnnl:intel.oneapi.win.tbb.devel
|
||||||
|
ONEAPI_ROOT: "C:/Program Files (x86)/Intel/oneAPI"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: ccache
|
||||||
|
uses: hendrikmuhs/ccache-action@v1.2.16
|
||||||
|
with:
|
||||||
|
key: windows-latest-cmake-sycl
|
||||||
|
variant: ccache
|
||||||
|
evict-old-files: 1d
|
||||||
|
|
||||||
|
- name: Install
|
||||||
|
run: |
|
||||||
|
scripts/install-oneapi.bat $WINDOWS_BASEKIT_URL $WINDOWS_DPCPP_MKL
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
shell: cmd
|
||||||
|
run: |
|
||||||
|
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
|
||||||
|
cmake -G "Ninja" -B build ^
|
||||||
|
-DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx ^
|
||||||
|
-DCMAKE_BUILD_TYPE=Release ^
|
||||||
|
-DGGML_BACKEND_DL=ON -DBUILD_SHARED_LIBS=ON ^
|
||||||
|
-DGGML_CPU=OFF -DGGML_SYCL=ON ^
|
||||||
|
-DLLAMA_CURL=OFF
|
||||||
|
cmake --build build --target ggml-sycl -j
|
||||||
|
|
||||||
|
- name: Build the release package
|
||||||
|
id: pack_artifacts
|
||||||
|
run: |
|
||||||
|
echo "cp oneAPI running time dll files in ${{ env.ONEAPI_ROOT }} to ./build/bin"
|
||||||
|
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_sycl_blas.5.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_core.2.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_tbb_thread.2.dll" ./build/bin
|
||||||
|
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_level_zero.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_opencl.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_loader.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_win_proxy_loader.dll" ./build/bin
|
||||||
|
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl8.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/svml_dispmd.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libmmd.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libiomp5md.dll" ./build/bin
|
||||||
|
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/dnnl/latest/bin/dnnl.dll" ./build/bin
|
||||||
|
cp "${{ env.ONEAPI_ROOT }}/tbb/latest/bin/tbb12.dll" ./build/bin
|
||||||
|
|
||||||
|
echo "cp oneAPI running time dll files to ./build/bin done"
|
||||||
|
7z a llama-bin-win-sycl-x64.zip ./build/bin/*
|
||||||
|
|
||||||
|
- name: Upload the release package
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
path: llama-bin-win-sycl-x64.zip
|
||||||
|
name: llama-bin-win-sycl-x64.zip
|
||||||
|
|
||||||
|
windows-hip:
|
||||||
|
runs-on: windows-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- name: "radeon"
|
||||||
|
gpu_targets: "gfx1100;gfx1101;gfx1102;gfx1030;gfx1031;gfx1032"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Clone rocWMMA repository
|
||||||
|
id: clone_rocwmma
|
||||||
|
run: |
|
||||||
|
git clone https://github.com/rocm/rocwmma --branch rocm-6.2.4 --depth 1
|
||||||
|
|
||||||
|
- name: ccache
|
||||||
|
uses: hendrikmuhs/ccache-action@v1.2.16
|
||||||
|
with:
|
||||||
|
key: windows-latest-cmake-hip-${{ matrix.name }}-x64
|
||||||
|
evict-old-files: 1d
|
||||||
|
|
||||||
|
- name: Install
|
||||||
|
id: depends
|
||||||
|
run: |
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
write-host "Downloading AMD HIP SDK Installer"
|
||||||
|
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
|
||||||
|
write-host "Installing AMD HIP SDK"
|
||||||
|
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
|
||||||
|
write-host "Completed AMD HIP SDK installation"
|
||||||
|
|
||||||
|
- name: Verify ROCm
|
||||||
|
id: verify
|
||||||
|
run: |
|
||||||
|
& 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
$env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
|
||||||
|
$env:CMAKE_PREFIX_PATH="${env:HIP_PATH}"
|
||||||
|
cmake -G "Unix Makefiles" -B build -S . `
|
||||||
|
-DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" `
|
||||||
|
-DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" `
|
||||||
|
-DCMAKE_CXX_FLAGS="-I$($PWD.Path.Replace('\', '/'))/rocwmma/library/include/ -Wno-ignored-attributes -Wno-nested-anon-types" `
|
||||||
|
-DCMAKE_BUILD_TYPE=Release `
|
||||||
|
-DGGML_BACKEND_DL=ON `
|
||||||
|
-DGGML_NATIVE=OFF `
|
||||||
|
-DGGML_CPU=OFF `
|
||||||
|
-DAMDGPU_TARGETS="${{ matrix.gpu_targets }}" `
|
||||||
|
-DGGML_HIP_ROCWMMA_FATTN=ON `
|
||||||
|
-DGGML_HIP=ON `
|
||||||
|
-DLLAMA_CURL=OFF
|
||||||
|
cmake --build build --target ggml-hip -j ${env:NUMBER_OF_PROCESSORS}
|
||||||
|
md "build\bin\rocblas\library\"
|
||||||
|
cp "${env:HIP_PATH}\bin\hipblas.dll" "build\bin\"
|
||||||
|
cp "${env:HIP_PATH}\bin\rocblas.dll" "build\bin\"
|
||||||
|
cp "${env:HIP_PATH}\bin\rocblas\library\*" "build\bin\rocblas\library\"
|
||||||
|
|
||||||
|
- name: Pack artifacts
|
||||||
|
id: pack_artifacts
|
||||||
|
run: |
|
||||||
|
7z a llama-bin-win-hip-${{ matrix.name }}-x64.zip .\build\bin\*
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
path: llama-bin-win-hip-${{ matrix.name }}-x64.zip
|
||||||
|
name: llama-bin-win-hip-${{ matrix.name }}-x64.zip
|
||||||
|
|
||||||
|
ios-xcode-build:
|
||||||
|
runs-on: macos-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
sysctl -a
|
||||||
|
cmake -B build -G Xcode \
|
||||||
|
-DGGML_METAL_USE_BF16=ON \
|
||||||
|
-DGGML_METAL_EMBED_LIBRARY=ON \
|
||||||
|
-DLLAMA_CURL=OFF \
|
||||||
|
-DLLAMA_BUILD_EXAMPLES=OFF \
|
||||||
|
-DLLAMA_BUILD_TOOLS=OFF \
|
||||||
|
-DLLAMA_BUILD_TESTS=OFF \
|
||||||
|
-DLLAMA_BUILD_SERVER=OFF \
|
||||||
|
-DCMAKE_SYSTEM_NAME=iOS \
|
||||||
|
-DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
|
||||||
|
cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
|
||||||
|
|
||||||
|
- name: xcodebuild for swift package
|
||||||
|
id: xcodebuild
|
||||||
|
run: |
|
||||||
|
./build-xcframework.sh
|
||||||
|
|
||||||
|
- name: Build Xcode project
|
||||||
|
run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' FRAMEWORK_FOLDER_PATH=./build-ios build
|
||||||
|
|
||||||
|
- name: Determine tag name
|
||||||
|
id: tag
|
||||||
|
uses: ./.github/actions/get-tag-name
|
||||||
|
|
||||||
|
- name: Pack artifacts
|
||||||
|
id: pack_artifacts
|
||||||
|
run: |
|
||||||
|
zip --symlinks -r llama-${{ steps.tag.outputs.name }}-xcframework.zip build-apple/llama.xcframework
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
path: llama-${{ steps.tag.outputs.name }}-xcframework.zip
|
||||||
|
name: llama-${{ steps.tag.outputs.name }}-xcframework
|
||||||
|
|
||||||
|
release:
|
||||||
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
|
|
||||||
|
# Fine-grant permission
|
||||||
|
# https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
|
||||||
|
permissions:
|
||||||
|
contents: write # for creating release
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
needs:
|
||||||
|
- windows
|
||||||
|
- windows-cpu
|
||||||
|
- windows-cuda
|
||||||
|
- windows-sycl
|
||||||
|
- windows-hip
|
||||||
|
- ubuntu-22-cpu
|
||||||
|
- ubuntu-22-vulkan
|
||||||
|
- macOS-arm64
|
||||||
|
- macOS-x64
|
||||||
|
- ios-xcode-build
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Determine tag name
|
||||||
|
id: tag
|
||||||
|
uses: ./.github/actions/get-tag-name
|
||||||
|
|
||||||
|
- name: Download artifacts
|
||||||
|
id: download-artifact
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
path: ./artifact
|
||||||
|
merge-multiple: true
|
||||||
|
|
||||||
|
- name: Move artifacts
|
||||||
|
id: move_artifacts
|
||||||
|
run: |
|
||||||
|
mkdir -p release
|
||||||
|
|
||||||
|
echo "Adding CPU backend files to existing zips..."
|
||||||
|
for arch in x64 arm64; do
|
||||||
|
cpu_zip="artifact/llama-bin-win-cpu-${arch}.zip"
|
||||||
|
temp_dir=$(mktemp -d)
|
||||||
|
echo "Extracting CPU backend for $arch..."
|
||||||
|
unzip "$cpu_zip" -d "$temp_dir"
|
||||||
|
|
||||||
|
echo "Adding CPU files to $arch zips..."
|
||||||
|
for target_zip in artifact/llama-bin-win-*-${arch}.zip; do
|
||||||
|
if [[ "$target_zip" == "$cpu_zip" ]]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
echo "Adding CPU backend to $(basename "$target_zip")"
|
||||||
|
realpath_target_zip=$(realpath "$target_zip")
|
||||||
|
(cd "$temp_dir" && zip -r "$realpath_target_zip" .)
|
||||||
|
done
|
||||||
|
|
||||||
|
rm -rf "$temp_dir"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Renaming and moving zips to release..."
|
||||||
|
for zip_file in artifact/llama-bin-win-*.zip; do
|
||||||
|
base_name=$(basename "$zip_file" .zip)
|
||||||
|
zip_name="llama-${{ steps.tag.outputs.name }}-${base_name#llama-}.zip"
|
||||||
|
echo "Moving $zip_file to release/$zip_name"
|
||||||
|
mv "$zip_file" "release/$zip_name"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Moving other artifacts..."
|
||||||
|
mv -v artifact/*.zip release
|
||||||
|
|
||||||
|
- name: Create release
|
||||||
|
id: create_release
|
||||||
|
uses: ggml-org/action-create-release@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
tag_name: ${{ steps.tag.outputs.name }}
|
||||||
|
|
||||||
|
- name: Upload release
|
||||||
|
id: upload_release
|
||||||
|
uses: actions/github-script@v3
|
||||||
|
with:
|
||||||
|
github-token: ${{secrets.GITHUB_TOKEN}}
|
||||||
|
script: |
|
||||||
|
const path = require('path');
|
||||||
|
const fs = require('fs');
|
||||||
|
const release_id = '${{ steps.create_release.outputs.id }}';
|
||||||
|
for (let file of await fs.readdirSync('./release')) {
|
||||||
|
if (path.extname(file) === '.zip') {
|
||||||
|
console.log('uploadReleaseAsset', file);
|
||||||
|
await github.repos.uploadReleaseAsset({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
release_id: release_id,
|
||||||
|
name: file,
|
||||||
|
data: await fs.readFileSync(`./release/${file}`)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
26
.github/workflows/server.yml
vendored
26
.github/workflows/server.yml
vendored
@ -15,10 +15,10 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
paths: ['.github/workflows/server.yml', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m', 'examples/server/**.*']
|
paths: ['.github/workflows/server.yml', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m', 'tools/server/**.*']
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [opened, synchronize, reopened]
|
types: [opened, synchronize, reopened]
|
||||||
paths: ['.github/workflows/server.yml', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m', 'examples/server/**.*']
|
paths: ['.github/workflows/server.yml', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m', 'tools/server/**.*']
|
||||||
|
|
||||||
env:
|
env:
|
||||||
LLAMA_LOG_COLORS: 1
|
LLAMA_LOG_COLORS: 1
|
||||||
@ -74,7 +74,7 @@ jobs:
|
|||||||
- name: Tests dependencies
|
- name: Tests dependencies
|
||||||
id: test_dependencies
|
id: test_dependencies
|
||||||
run: |
|
run: |
|
||||||
pip install -r examples/server/tests/requirements.txt
|
pip install -r tools/server/tests/requirements.txt
|
||||||
|
|
||||||
# Setup nodejs (to be used for verifying bundled index.html)
|
# Setup nodejs (to be used for verifying bundled index.html)
|
||||||
- uses: actions/setup-node@v4
|
- uses: actions/setup-node@v4
|
||||||
@ -84,14 +84,14 @@ jobs:
|
|||||||
- name: WebUI - Install dependencies
|
- name: WebUI - Install dependencies
|
||||||
id: webui_lint
|
id: webui_lint
|
||||||
run: |
|
run: |
|
||||||
cd examples/server/webui
|
cd tools/server/webui
|
||||||
npm ci
|
npm ci
|
||||||
|
|
||||||
- name: WebUI - Check code format
|
- name: WebUI - Check code format
|
||||||
id: webui_format
|
id: webui_format
|
||||||
run: |
|
run: |
|
||||||
git config --global --add safe.directory $(realpath .)
|
git config --global --add safe.directory $(realpath .)
|
||||||
cd examples/server/webui
|
cd tools/server/webui
|
||||||
git status
|
git status
|
||||||
|
|
||||||
npm run format
|
npm run format
|
||||||
@ -108,7 +108,7 @@ jobs:
|
|||||||
id: verify_server_index_html
|
id: verify_server_index_html
|
||||||
run: |
|
run: |
|
||||||
git config --global --add safe.directory $(realpath .)
|
git config --global --add safe.directory $(realpath .)
|
||||||
cd examples/server/webui
|
cd tools/server/webui
|
||||||
git status
|
git status
|
||||||
|
|
||||||
npm run build
|
npm run build
|
||||||
@ -161,26 +161,26 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
GITHUB_ACTIONS: "true"
|
GITHUB_ACTIONS: "true"
|
||||||
run: |
|
run: |
|
||||||
cd examples/server/tests
|
cd tools/server/tests
|
||||||
./tests.sh
|
./tests.sh
|
||||||
|
|
||||||
- name: Tests (sanitizers)
|
- name: Tests (sanitizers)
|
||||||
id: server_integration_tests_sanitizers
|
id: server_integration_tests_sanitizers
|
||||||
if: ${{ matrix.sanitizer != '' }}
|
if: ${{ matrix.sanitizer != '' }}
|
||||||
run: |
|
run: |
|
||||||
cd examples/server/tests
|
cd tools/server/tests
|
||||||
LLAMA_SANITIZE=1 ./tests.sh
|
LLAMA_SANITIZE=1 ./tests.sh
|
||||||
|
|
||||||
- name: Slow tests
|
- name: Slow tests
|
||||||
id: server_integration_tests_slow
|
id: server_integration_tests_slow
|
||||||
if: ${{ (github.event.schedule || github.event.inputs.slow_tests == 'true') && matrix.build_type == 'Release' }}
|
if: ${{ (github.event.schedule || github.event.inputs.slow_tests == 'true') && matrix.build_type == 'Release' }}
|
||||||
run: |
|
run: |
|
||||||
cd examples/server/tests
|
cd tools/server/tests
|
||||||
SLOW_TESTS=1 ./tests.sh
|
SLOW_TESTS=1 ./tests.sh
|
||||||
|
|
||||||
|
|
||||||
server-windows:
|
server-windows:
|
||||||
runs-on: windows-2019
|
runs-on: windows-2022
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
@ -211,7 +211,7 @@ jobs:
|
|||||||
- name: Tests dependencies
|
- name: Tests dependencies
|
||||||
id: test_dependencies
|
id: test_dependencies
|
||||||
run: |
|
run: |
|
||||||
pip install -r examples/server/tests/requirements.txt
|
pip install -r tools/server/tests/requirements.txt
|
||||||
|
|
||||||
- name: Copy Libcurl
|
- name: Copy Libcurl
|
||||||
id: prepare_libcurl
|
id: prepare_libcurl
|
||||||
@ -224,7 +224,7 @@ jobs:
|
|||||||
id: server_integration_tests
|
id: server_integration_tests
|
||||||
if: ${{ !matrix.disabled_on_pr || !github.event.pull_request }}
|
if: ${{ !matrix.disabled_on_pr || !github.event.pull_request }}
|
||||||
run: |
|
run: |
|
||||||
cd examples/server/tests
|
cd tools/server/tests
|
||||||
$env:PYTHONIOENCODING = ":replace"
|
$env:PYTHONIOENCODING = ":replace"
|
||||||
pytest -v -x -m "not slow"
|
pytest -v -x -m "not slow"
|
||||||
|
|
||||||
@ -232,6 +232,6 @@ jobs:
|
|||||||
id: server_integration_tests_slow
|
id: server_integration_tests_slow
|
||||||
if: ${{ (github.event.schedule || github.event.inputs.slow_tests == 'true') && matrix.build_type == 'Release' }}
|
if: ${{ (github.event.schedule || github.event.inputs.slow_tests == 'true') && matrix.build_type == 'Release' }}
|
||||||
run: |
|
run: |
|
||||||
cd examples/server/tests
|
cd tools/server/tests
|
||||||
$env:SLOW_TESTS = "1"
|
$env:SLOW_TESTS = "1"
|
||||||
pytest -v -x
|
pytest -v -x
|
||||||
|
42
.github/workflows/winget.yml
vendored
Normal file
42
.github/workflows/winget.yml
vendored
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
name: Update Winget Package
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch: # allows manual triggering
|
||||||
|
schedule:
|
||||||
|
- cron: '28 5 * * *' # Update every day at 5:28 UTC
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
update:
|
||||||
|
name: Update Winget Package
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Install cargo binstall
|
||||||
|
uses: cargo-bins/cargo-binstall@268643a6b5ea099f5718ee5cd3ff7dc89a5eb49b
|
||||||
|
|
||||||
|
- name: Install komac
|
||||||
|
run: |
|
||||||
|
cargo binstall komac@2.11.2 -y
|
||||||
|
|
||||||
|
- name: Find latest release
|
||||||
|
id: find_latest_release
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const { data: releases } = await github.rest.repos.listReleases({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
});
|
||||||
|
console.log("Latest release:", releases[0].tag_name);
|
||||||
|
return releases[0].tag_name;
|
||||||
|
|
||||||
|
- name: Update manifest
|
||||||
|
env:
|
||||||
|
VERSION: ${{ steps.find_latest_release.outputs.result }}
|
||||||
|
run: |
|
||||||
|
echo "Updating manifest..."
|
||||||
|
komac update --version ${{ env.VERSION }} \
|
||||||
|
--urls "https://github.com/ggml-org/llama.cpp/releases/download/${{ env.VERSION }}/llama-${{ env.VERSION }}-bin-win-vulkan-x64.zip" \
|
||||||
|
--token ${{ secrets.WINGET_GITHUB_TOKEN }} \
|
||||||
|
--submit \
|
||||||
|
ggml.llamacpp
|
12
.gitignore
vendored
12
.gitignore
vendored
@ -96,11 +96,11 @@ perf-*.txt
|
|||||||
# Examples
|
# Examples
|
||||||
|
|
||||||
examples/jeopardy/results.txt
|
examples/jeopardy/results.txt
|
||||||
examples/server/*.css.hpp
|
tools/server/*.css.hpp
|
||||||
examples/server/*.html.hpp
|
tools/server/*.html.hpp
|
||||||
examples/server/*.js.hpp
|
tools/server/*.js.hpp
|
||||||
examples/server/*.mjs.hpp
|
tools/server/*.mjs.hpp
|
||||||
examples/server/*.gz.hpp
|
tools/server/*.gz.hpp
|
||||||
!build_64.sh
|
!build_64.sh
|
||||||
!examples/*.bat
|
!examples/*.bat
|
||||||
!examples/*/*.kts
|
!examples/*/*.kts
|
||||||
@ -110,7 +110,7 @@ examples/server/*.gz.hpp
|
|||||||
|
|
||||||
# Server Web UI temporary files
|
# Server Web UI temporary files
|
||||||
node_modules
|
node_modules
|
||||||
examples/server/webui/dist
|
tools/server/webui/dist
|
||||||
|
|
||||||
# Python
|
# Python
|
||||||
|
|
||||||
|
@ -77,6 +77,7 @@ option(LLAMA_BUILD_COMMON "llama: build common utils library" ${LLAMA_STANDALONE
|
|||||||
|
|
||||||
# extra artifacts
|
# extra artifacts
|
||||||
option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
|
option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
|
||||||
|
option(LLAMA_BUILD_TOOLS "llama: build tools" ${LLAMA_STANDALONE})
|
||||||
option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE})
|
option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE})
|
||||||
option(LLAMA_BUILD_SERVER "llama: build server example" ${LLAMA_STANDALONE})
|
option(LLAMA_BUILD_SERVER "llama: build server example" ${LLAMA_STANDALONE})
|
||||||
|
|
||||||
@ -88,6 +89,14 @@ option(LLAMA_LLGUIDANCE "llama-common: include LLGuidance library for structured
|
|||||||
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake)
|
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake)
|
||||||
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/common.cmake)
|
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/common.cmake)
|
||||||
|
|
||||||
|
if (NOT DEFINED LLAMA_BUILD_NUMBER)
|
||||||
|
set(LLAMA_BUILD_NUMBER ${BUILD_NUMBER})
|
||||||
|
endif()
|
||||||
|
if (NOT DEFINED LLAMA_BUILD_COMMIT)
|
||||||
|
set(LLAMA_BUILD_COMMIT ${BUILD_COMMIT})
|
||||||
|
endif()
|
||||||
|
set(LLAMA_INSTALL_VERSION 0.0.${BUILD_NUMBER})
|
||||||
|
|
||||||
# override ggml options
|
# override ggml options
|
||||||
set(GGML_ALL_WARNINGS ${LLAMA_ALL_WARNINGS})
|
set(GGML_ALL_WARNINGS ${LLAMA_ALL_WARNINGS})
|
||||||
set(GGML_FATAL_WARNINGS ${LLAMA_FATAL_WARNINGS})
|
set(GGML_FATAL_WARNINGS ${LLAMA_FATAL_WARNINGS})
|
||||||
@ -154,10 +163,17 @@ if (LLAMA_USE_SYSTEM_GGML)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (NOT TARGET ggml AND NOT LLAMA_USE_SYSTEM_GGML)
|
if (NOT TARGET ggml AND NOT LLAMA_USE_SYSTEM_GGML)
|
||||||
|
set(GGML_BUILD_NUMBER ${LLAMA_BUILD_NUMBER})
|
||||||
|
set(GGML_BUILD_COMMIT ${LLAMA_BUILD_COMMIT})
|
||||||
add_subdirectory(ggml)
|
add_subdirectory(ggml)
|
||||||
# ... otherwise assume ggml is added by a parent CMakeLists.txt
|
# ... otherwise assume ggml is added by a parent CMakeLists.txt
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (MINGW)
|
||||||
|
# Target Windows 8 for PrefetchVirtualMemory
|
||||||
|
add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
|
||||||
|
endif()
|
||||||
|
|
||||||
#
|
#
|
||||||
# build the library
|
# build the library
|
||||||
#
|
#
|
||||||
@ -187,6 +203,10 @@ if (LLAMA_BUILD_COMMON AND LLAMA_BUILD_EXAMPLES)
|
|||||||
add_subdirectory(pocs)
|
add_subdirectory(pocs)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (LLAMA_BUILD_COMMON AND LLAMA_BUILD_TOOLS)
|
||||||
|
add_subdirectory(tools)
|
||||||
|
endif()
|
||||||
|
|
||||||
#
|
#
|
||||||
# install
|
# install
|
||||||
#
|
#
|
||||||
@ -194,10 +214,6 @@ endif()
|
|||||||
include(GNUInstallDirs)
|
include(GNUInstallDirs)
|
||||||
include(CMakePackageConfigHelpers)
|
include(CMakePackageConfigHelpers)
|
||||||
|
|
||||||
set(LLAMA_BUILD_NUMBER ${BUILD_NUMBER})
|
|
||||||
set(LLAMA_BUILD_COMMIT ${BUILD_COMMIT})
|
|
||||||
set(LLAMA_INSTALL_VERSION 0.0.${BUILD_NUMBER})
|
|
||||||
|
|
||||||
set(LLAMA_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files")
|
set(LLAMA_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files")
|
||||||
set(LLAMA_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files")
|
set(LLAMA_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files")
|
||||||
set(LLAMA_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files")
|
set(LLAMA_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files")
|
||||||
@ -247,20 +263,3 @@ configure_file(cmake/llama.pc.in
|
|||||||
|
|
||||||
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/llama.pc"
|
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/llama.pc"
|
||||||
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
|
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
|
||||||
|
|
||||||
#
|
|
||||||
# copy the license files
|
|
||||||
#
|
|
||||||
|
|
||||||
# Check if running in GitHub Actions
|
|
||||||
if(DEFINED ENV{GITHUB_ACTIONS} AND "$ENV{GITHUB_ACTIONS}" STREQUAL "true")
|
|
||||||
message(STATUS "Running inside GitHub Actions - copying license files")
|
|
||||||
|
|
||||||
# Copy all files from licenses/ to build/bin/
|
|
||||||
file(GLOB LICENSE_FILES "${CMAKE_SOURCE_DIR}/licenses/*")
|
|
||||||
foreach(LICENSE_FILE ${LICENSE_FILES})
|
|
||||||
get_filename_component(FILENAME ${LICENSE_FILE} NAME)
|
|
||||||
configure_file(${LICENSE_FILE} "${CMAKE_BINARY_DIR}/bin/${FILENAME}" COPYONLY)
|
|
||||||
endforeach()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
|
@ -38,15 +38,6 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
{
|
|
||||||
"name": "arm64-windows-msvc", "hidden": true,
|
|
||||||
"architecture": { "value": "arm64", "strategy": "external" },
|
|
||||||
"toolset": { "value": "host=x64", "strategy": "external" },
|
|
||||||
"cacheVariables": {
|
|
||||||
"CMAKE_TOOLCHAIN_FILE": "${sourceDir}/cmake/arm64-windows-msvc.cmake"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
{
|
||||||
"name": "arm64-windows-llvm", "hidden": true,
|
"name": "arm64-windows-llvm", "hidden": true,
|
||||||
"architecture": { "value": "arm64", "strategy": "external" },
|
"architecture": { "value": "arm64", "strategy": "external" },
|
||||||
@ -73,10 +64,6 @@
|
|||||||
{ "name": "arm64-apple-clang-release", "inherits": [ "base", "arm64-apple-clang", "reldbg" ] },
|
{ "name": "arm64-apple-clang-release", "inherits": [ "base", "arm64-apple-clang", "reldbg" ] },
|
||||||
{ "name": "arm64-apple-clang+static-release", "inherits": [ "base", "arm64-apple-clang", "reldbg", "static" ] },
|
{ "name": "arm64-apple-clang+static-release", "inherits": [ "base", "arm64-apple-clang", "reldbg", "static" ] },
|
||||||
|
|
||||||
{ "name": "arm64-windows-msvc-debug", "inherits": [ "base", "arm64-windows-msvc", "debug" ] },
|
|
||||||
{ "name": "arm64-windows-msvc-release", "inherits": [ "base", "arm64-windows-msvc", "reldbg" ] },
|
|
||||||
{ "name": "arm64-windows-msvc+static-release", "inherits": [ "base", "arm64-windows-msvc", "reldbg", "static" ] },
|
|
||||||
|
|
||||||
{ "name": "x64-windows-llvm-debug", "inherits": [ "base", "x64-windows-llvm", "debug" ] },
|
{ "name": "x64-windows-llvm-debug", "inherits": [ "base", "x64-windows-llvm", "debug" ] },
|
||||||
{ "name": "x64-windows-llvm-release", "inherits": [ "base", "x64-windows-llvm", "release" ] },
|
{ "name": "x64-windows-llvm-release", "inherits": [ "base", "x64-windows-llvm", "release" ] },
|
||||||
{ "name": "x64-windows-llvm-reldbg", "inherits": [ "base", "x64-windows-llvm", "reldbg" ] },
|
{ "name": "x64-windows-llvm-reldbg", "inherits": [ "base", "x64-windows-llvm", "reldbg" ] },
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
/ci/ @ggerganov
|
/ci/ @ggerganov
|
||||||
/.devops/*.Dockerfile @ngxson
|
/.devops/*.Dockerfile @ngxson
|
||||||
/examples/server/ @ngxson
|
/tools/server/ @ngxson
|
||||||
/ggml/src/ggml-cuda/fattn* @JohannesGaessler
|
/ggml/src/ggml-cuda/fattn* @JohannesGaessler
|
||||||
/ggml/src/ggml-cuda/mmq.* @JohannesGaessler
|
/ggml/src/ggml-cuda/mmq.* @JohannesGaessler
|
||||||
/ggml/src/ggml-cuda/mmv.* @JohannesGaessler
|
/ggml/src/ggml-cuda/mmv.* @JohannesGaessler
|
||||||
|
105
Makefile
105
Makefile
@ -367,7 +367,7 @@ ifdef LLAMA_SERVER_SSL
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
ifndef GGML_NO_CPU_AARCH64
|
ifndef GGML_NO_CPU_AARCH64
|
||||||
MK_CPPFLAGS += -DGGML_USE_CPU_AARCH64
|
MK_CPPFLAGS += -DGGML_USE_CPU_REPACK
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# warnings
|
# warnings
|
||||||
@ -780,10 +780,6 @@ ifdef GGML_HIP
|
|||||||
|
|
||||||
MK_CPPFLAGS += -DGGML_USE_HIP -DGGML_USE_CUDA
|
MK_CPPFLAGS += -DGGML_USE_HIP -DGGML_USE_CUDA
|
||||||
|
|
||||||
ifdef GGML_HIP_UMA
|
|
||||||
MK_CPPFLAGS += -DGGML_HIP_UMA
|
|
||||||
endif # GGML_HIP_UMA
|
|
||||||
|
|
||||||
MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
|
MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
|
||||||
MK_LDFLAGS += -L$(ROCM_PATH)/lib64 -Wl,-rpath=$(ROCM_PATH)/lib64
|
MK_LDFLAGS += -L$(ROCM_PATH)/lib64 -Wl,-rpath=$(ROCM_PATH)/lib64
|
||||||
MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas
|
MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas
|
||||||
@ -974,7 +970,7 @@ OBJ_GGML = \
|
|||||||
$(DIR_GGML)/src/ggml-threading.o \
|
$(DIR_GGML)/src/ggml-threading.o \
|
||||||
$(DIR_GGML)/src/ggml-cpu/ggml-cpu.o \
|
$(DIR_GGML)/src/ggml-cpu/ggml-cpu.o \
|
||||||
$(DIR_GGML)/src/ggml-cpu/ggml-cpu_cpp.o \
|
$(DIR_GGML)/src/ggml-cpu/ggml-cpu_cpp.o \
|
||||||
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-aarch64.o \
|
$(DIR_GGML)/src/ggml-cpu/repack.o \
|
||||||
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-hbm.o \
|
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-hbm.o \
|
||||||
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-quants.o \
|
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-quants.o \
|
||||||
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-traits.o \
|
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-traits.o \
|
||||||
@ -1160,10 +1156,10 @@ $(LIB_COMMON_S): $(OBJ_COMMON)
|
|||||||
|
|
||||||
# Clean generated server assets
|
# Clean generated server assets
|
||||||
clean-server-assets:
|
clean-server-assets:
|
||||||
find examples/server -type f -name "*.js.hpp" -delete
|
find tools/server -type f -name "*.js.hpp" -delete
|
||||||
find examples/server -type f -name "*.mjs.hpp" -delete
|
find tools/server -type f -name "*.mjs.hpp" -delete
|
||||||
find examples/server -type f -name "*.css.hpp" -delete
|
find tools/server -type f -name "*.css.hpp" -delete
|
||||||
find examples/server -type f -name "*.html.hpp" -delete
|
find tools/server -type f -name "*.html.hpp" -delete
|
||||||
|
|
||||||
# Clean rule
|
# Clean rule
|
||||||
clean: clean-server-assets
|
clean: clean-server-assets
|
||||||
@ -1183,7 +1179,7 @@ clean: clean-server-assets
|
|||||||
# Helper function that replaces .c, .cpp, and .cu file endings with .o:
|
# Helper function that replaces .c, .cpp, and .cu file endings with .o:
|
||||||
GET_OBJ_FILE = $(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(patsubst %.cu,%.o,$(1))))
|
GET_OBJ_FILE = $(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(patsubst %.cu,%.o,$(1))))
|
||||||
|
|
||||||
llama-cli: examples/main/main.cpp \
|
llama-cli: tools/main/main.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
@ -1191,12 +1187,7 @@ llama-cli: examples/main/main.cpp \
|
|||||||
@echo '==== Run ./llama-cli -h for help. ===='
|
@echo '==== Run ./llama-cli -h for help. ===='
|
||||||
@echo
|
@echo
|
||||||
|
|
||||||
llama-infill: examples/infill/infill.cpp \
|
llama-run: tools/run/run.cpp \
|
||||||
$(OBJ_ALL)
|
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
|
||||||
|
|
||||||
llama-run: examples/run/run.cpp \
|
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
@ -1211,7 +1202,7 @@ llama-simple-chat: examples/simple-chat/simple-chat.cpp \
|
|||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
llama-tokenize: examples/tokenize/tokenize.cpp \
|
llama-tokenize: tools/tokenize/tokenize.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
@ -1221,27 +1212,27 @@ llama-batched: examples/batched/batched.cpp \
|
|||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
llama-batched-bench: examples/batched-bench/batched-bench.cpp \
|
llama-batched-bench: tools/batched-bench/batched-bench.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
llama-quantize: examples/quantize/quantize.cpp \
|
llama-quantize: tools/quantize/quantize.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
llama-quantize-stats: examples/quantize-stats/quantize-stats.cpp \
|
llama-quantize-stats: tools/quantize-stats/quantize-stats.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
llama-perplexity: examples/perplexity/perplexity.cpp \
|
llama-perplexity: tools/perplexity/perplexity.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
llama-imatrix: examples/imatrix/imatrix.cpp \
|
llama-imatrix: tools/imatrix/imatrix.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
@ -1283,7 +1274,7 @@ llama-gguf-hash: examples/gguf-hash/gguf-hash.cpp examples/gguf-hash/deps/sha1/s
|
|||||||
$(CXX) $(CXXFLAGS) -Iexamples/gguf-hash/deps -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -Iexamples/gguf-hash/deps -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
llama-gguf-split: examples/gguf-split/gguf-split.cpp \
|
llama-gguf-split: tools/gguf-split/gguf-split.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
@ -1293,7 +1284,7 @@ llama-eval-callback: examples/eval-callback/eval-callback.cpp \
|
|||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
llama-cvector-generator: examples/cvector-generator/cvector-generator.cpp \
|
llama-cvector-generator: tools/cvector-generator/cvector-generator.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
@ -1303,12 +1294,12 @@ llama-convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-
|
|||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
llama-bench: examples/llama-bench/llama-bench.cpp \
|
llama-bench: tools/llama-bench/llama-bench.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
llama-export-lora: examples/export-lora/export-lora.cpp \
|
llama-export-lora: tools/export-lora/export-lora.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
@ -1364,17 +1355,17 @@ llama-gbnf-validator: examples/gbnf-validator/gbnf-validator.cpp \
|
|||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
ifdef GGML_RPC
|
ifdef GGML_RPC
|
||||||
rpc-server: examples/rpc/rpc-server.cpp \
|
rpc-server: tools/rpc/rpc-server.cpp \
|
||||||
$(OBJ_GGML)
|
$(OBJ_GGML)
|
||||||
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
|
||||||
endif # GGML_RPC
|
endif # GGML_RPC
|
||||||
|
|
||||||
llama-server: \
|
llama-server: \
|
||||||
examples/server/server.cpp \
|
tools/server/server.cpp \
|
||||||
examples/server/utils.hpp \
|
tools/server/utils.hpp \
|
||||||
examples/server/httplib.h \
|
tools/server/httplib.h \
|
||||||
examples/server/index.html.hpp \
|
tools/server/index.html.hpp \
|
||||||
examples/server/loading.html.hpp \
|
tools/server/loading.html.hpp \
|
||||||
common/chat.cpp \
|
common/chat.cpp \
|
||||||
common/chat.h \
|
common/chat.h \
|
||||||
common/chat-template.hpp \
|
common/chat-template.hpp \
|
||||||
@ -1382,10 +1373,10 @@ llama-server: \
|
|||||||
common/minja.hpp \
|
common/minja.hpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h %.hpp $<,$^) -Iexamples/server $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) $(LWINSOCK2)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h %.hpp $<,$^) -Itools/server $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) $(LWINSOCK2)
|
||||||
|
|
||||||
# Portable equivalent of `cd examples/server/public && xxd -i $(notdir $<) ../$(notdir $<).hpp`:
|
# Portable equivalent of `cd tools/server/public && xxd -i $(notdir $<) ../$(notdir $<).hpp`:
|
||||||
examples/server/%.hpp: examples/server/public/% FORCE Makefile
|
tools/server/%.hpp: tools/server/public/% FORCE Makefile
|
||||||
@( export NAME=$(subst .,_,$(subst -,_,$(notdir $<))) && \
|
@( export NAME=$(subst .,_,$(subst -,_,$(notdir $<))) && \
|
||||||
echo "unsigned char $${NAME}[] = {" && \
|
echo "unsigned char $${NAME}[] = {" && \
|
||||||
cat $< | od -v -t x1 -An | sed -E 's/([0-9a-fA-F]+)/0x\1, /g' && \
|
cat $< | od -v -t x1 -An | sed -E 's/([0-9a-fA-F]+)/0x\1, /g' && \
|
||||||
@ -1398,36 +1389,36 @@ llama-gen-docs: examples/gen-docs/gen-docs.cpp \
|
|||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
libllava.a: examples/llava/llava.cpp \
|
libllava.a: tools/mtmd/llava.cpp \
|
||||||
examples/llava/llava.h \
|
tools/mtmd/llava.h \
|
||||||
examples/llava/clip.cpp \
|
tools/mtmd/clip.cpp \
|
||||||
examples/llava/clip.h \
|
tools/mtmd/clip.h \
|
||||||
common/stb_image.h \
|
common/stb_image.h \
|
||||||
common/base64.hpp \
|
common/base64.hpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -static -fPIC -c $< -o $@ -Wno-cast-qual
|
$(CXX) $(CXXFLAGS) -static -fPIC -c $< -o $@ -Wno-cast-qual
|
||||||
|
|
||||||
llama-llava-cli: examples/llava/llava-cli.cpp \
|
llama-llava-cli: tools/mtmd/llava-cli.cpp \
|
||||||
examples/llava/llava.cpp \
|
tools/mtmd/llava.cpp \
|
||||||
examples/llava/llava.h \
|
tools/mtmd/llava.h \
|
||||||
examples/llava/clip.cpp \
|
tools/mtmd/clip.cpp \
|
||||||
examples/llava/clip.h \
|
tools/mtmd/clip.h \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual
|
$(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual
|
||||||
|
|
||||||
llama-minicpmv-cli: examples/llava/minicpmv-cli.cpp \
|
llama-minicpmv-cli: tools/mtmd/minicpmv-cli.cpp \
|
||||||
examples/llava/llava.cpp \
|
tools/mtmd/llava.cpp \
|
||||||
examples/llava/llava.h \
|
tools/mtmd/llava.h \
|
||||||
examples/llava/clip.cpp \
|
tools/mtmd/clip.cpp \
|
||||||
examples/llava/clip.h \
|
tools/mtmd/clip.h \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual
|
$(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual
|
||||||
|
|
||||||
llama-qwen2vl-cli: examples/llava/qwen2vl-cli.cpp \
|
llama-qwen2vl-cli: tools/mtmd/qwen2vl-cli.cpp \
|
||||||
examples/llava/llava.cpp \
|
tools/mtmd/llava.cpp \
|
||||||
examples/llava/llava.h \
|
tools/mtmd/llava.h \
|
||||||
examples/llava/clip.cpp \
|
tools/mtmd/clip.cpp \
|
||||||
examples/llava/clip.h \
|
tools/mtmd/clip.h \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual
|
$(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual
|
||||||
|
|
||||||
@ -1484,12 +1475,12 @@ tests/test-double-float: tests/test-double-float.cpp
|
|||||||
|
|
||||||
tests/test-json-schema-to-grammar: tests/test-json-schema-to-grammar.cpp \
|
tests/test-json-schema-to-grammar: tests/test-json-schema-to-grammar.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -Iexamples/server -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -Itools/server -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
tests/test-chat: tests/test-chat.cpp \
|
tests/test-chat: tests/test-chat.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -Iexamples/server -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -Itools/server -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
tests/test-opt: tests/test-opt.cpp \
|
tests/test-opt: tests/test-opt.cpp \
|
||||||
|
84
README.md
84
README.md
@ -3,9 +3,10 @@
|
|||||||

|

|
||||||
|
|
||||||
[](https://opensource.org/licenses/MIT)
|
[](https://opensource.org/licenses/MIT)
|
||||||
|
[](https://github.com/ggml-org/llama.cpp/releases)
|
||||||
[](https://github.com/ggml-org/llama.cpp/actions/workflows/server.yml)
|
[](https://github.com/ggml-org/llama.cpp/actions/workflows/server.yml)
|
||||||
|
|
||||||
[Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggml-org/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggml-org/llama.cpp/discussions/205) / [ggml](https://github.com/ggml-org/ggml)
|
[Roadmap](https://github.com/users/ggerganov/projects/7) / [Manifesto](https://github.com/ggml-org/llama.cpp/discussions/205) / [ggml](https://github.com/ggml-org/ggml)
|
||||||
|
|
||||||
Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others) in pure C/C++
|
Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others) in pure C/C++
|
||||||
|
|
||||||
@ -16,8 +17,9 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others)
|
|||||||
|
|
||||||
## Hot topics
|
## Hot topics
|
||||||
|
|
||||||
- **How to use [MTLResidencySet](https://developer.apple.com/documentation/metal/mtlresidencyset?language=objc) to keep the GPU memory active?** https://github.com/ggml-org/llama.cpp/pull/11427
|
- 🔥 Multimodal support arrived in `llama-server`: [#12898](https://github.com/ggml-org/llama.cpp/pull/12898) | [documentation](./docs/multimodal.md)
|
||||||
- **VS Code extension for FIM completions:** https://github.com/ggml-org/llama.vscode
|
- A new binary `llama-mtmd-cli` is introduced to replace `llava-cli`, `minicpmv-cli`, `gemma3-cli` ([#13012](https://github.com/ggml-org/llama.cpp/pull/13012)) and `qwen2vl-cli` ([#13141](https://github.com/ggml-org/llama.cpp/pull/13141)), `libllava` will be deprecated
|
||||||
|
- VS Code extension for FIM completions: https://github.com/ggml-org/llama.vscode
|
||||||
- Universal [tool call support](./docs/function-calling.md) in `llama-server` https://github.com/ggml-org/llama.cpp/pull/9639
|
- Universal [tool call support](./docs/function-calling.md) in `llama-server` https://github.com/ggml-org/llama.cpp/pull/9639
|
||||||
- Vim/Neovim plugin for FIM completions: https://github.com/ggml-org/llama.vim
|
- Vim/Neovim plugin for FIM completions: https://github.com/ggml-org/llama.vim
|
||||||
- Introducing GGUF-my-LoRA https://github.com/ggml-org/llama.cpp/discussions/10123
|
- Introducing GGUF-my-LoRA https://github.com/ggml-org/llama.cpp/discussions/10123
|
||||||
@ -26,6 +28,30 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others)
|
|||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
|
## Quick start
|
||||||
|
|
||||||
|
Getting started with llama.cpp is straightforward. Here are several ways to install it on your machine:
|
||||||
|
|
||||||
|
- Install `llama.cpp` using [brew, nix or winget](docs/install.md)
|
||||||
|
- Run with Docker - see our [Docker documentation](docs/docker.md)
|
||||||
|
- Download pre-built binaries from the [releases page](https://github.com/ggml-org/llama.cpp/releases)
|
||||||
|
- Build from source by cloning this repository - check out [our build guide](docs/build.md)
|
||||||
|
|
||||||
|
Once installed, you'll need a model to work with. Head to the [Obtaining and quantizing models](#obtaining-and-quantizing-models) section to learn more.
|
||||||
|
|
||||||
|
Example command:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Use a local model file
|
||||||
|
llama-cli -m my_model.gguf
|
||||||
|
|
||||||
|
# Or download and run a model directly from Hugging Face
|
||||||
|
llama-cli -hf ggml-org/gemma-3-1b-it-GGUF
|
||||||
|
|
||||||
|
# Launch OpenAI-compatible API server
|
||||||
|
llama-server -hf ggml-org/gemma-3-1b-it-GGUF
|
||||||
|
```
|
||||||
|
|
||||||
## Description
|
## Description
|
||||||
|
|
||||||
The main goal of `llama.cpp` is to enable LLM inference with minimal setup and state-of-the-art performance on a wide
|
The main goal of `llama.cpp` is to enable LLM inference with minimal setup and state-of-the-art performance on a wide
|
||||||
@ -35,7 +61,7 @@ range of hardware - locally and in the cloud.
|
|||||||
- Apple silicon is a first-class citizen - optimized via ARM NEON, Accelerate and Metal frameworks
|
- Apple silicon is a first-class citizen - optimized via ARM NEON, Accelerate and Metal frameworks
|
||||||
- AVX, AVX2, AVX512 and AMX support for x86 architectures
|
- AVX, AVX2, AVX512 and AMX support for x86 architectures
|
||||||
- 1.5-bit, 2-bit, 3-bit, 4-bit, 5-bit, 6-bit, and 8-bit integer quantization for faster inference and reduced memory use
|
- 1.5-bit, 2-bit, 3-bit, 4-bit, 5-bit, 6-bit, and 8-bit integer quantization for faster inference and reduced memory use
|
||||||
- Custom CUDA kernels for running LLMs on NVIDIA GPUs (support for AMD GPUs via HIP and Moore Threads MTT GPUs via MUSA)
|
- Custom CUDA kernels for running LLMs on NVIDIA GPUs (support for AMD GPUs via HIP and Moore Threads GPUs via MUSA)
|
||||||
- Vulkan and SYCL backend support
|
- Vulkan and SYCL backend support
|
||||||
- CPU+GPU hybrid inference to partially accelerate models larger than the total VRAM capacity
|
- CPU+GPU hybrid inference to partially accelerate models larger than the total VRAM capacity
|
||||||
|
|
||||||
@ -128,6 +154,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
|
|||||||
<details>
|
<details>
|
||||||
<summary>Bindings</summary>
|
<summary>Bindings</summary>
|
||||||
|
|
||||||
|
- Python: [ddh0/easy-llama](https://github.com/ddh0/easy-llama)
|
||||||
- Python: [abetlen/llama-cpp-python](https://github.com/abetlen/llama-cpp-python)
|
- Python: [abetlen/llama-cpp-python](https://github.com/abetlen/llama-cpp-python)
|
||||||
- Go: [go-skynet/go-llama.cpp](https://github.com/go-skynet/go-llama.cpp)
|
- Go: [go-skynet/go-llama.cpp](https://github.com/go-skynet/go-llama.cpp)
|
||||||
- Node.js: [withcatai/node-llama-cpp](https://github.com/withcatai/node-llama-cpp)
|
- Node.js: [withcatai/node-llama-cpp](https://github.com/withcatai/node-llama-cpp)
|
||||||
@ -227,6 +254,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
|
||||||
## Supported backends
|
## Supported backends
|
||||||
|
|
||||||
| Backend | Target devices |
|
| Backend | Target devices |
|
||||||
@ -235,23 +263,13 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
|
|||||||
| [BLAS](docs/build.md#blas-build) | All |
|
| [BLAS](docs/build.md#blas-build) | All |
|
||||||
| [BLIS](docs/backend/BLIS.md) | All |
|
| [BLIS](docs/backend/BLIS.md) | All |
|
||||||
| [SYCL](docs/backend/SYCL.md) | Intel and Nvidia GPU |
|
| [SYCL](docs/backend/SYCL.md) | Intel and Nvidia GPU |
|
||||||
| [MUSA](docs/build.md#musa) | Moore Threads MTT GPU |
|
| [MUSA](docs/build.md#musa) | Moore Threads GPU |
|
||||||
| [CUDA](docs/build.md#cuda) | Nvidia GPU |
|
| [CUDA](docs/build.md#cuda) | Nvidia GPU |
|
||||||
| [HIP](docs/build.md#hip) | AMD GPU |
|
| [HIP](docs/build.md#hip) | AMD GPU |
|
||||||
| [Vulkan](docs/build.md#vulkan) | GPU |
|
| [Vulkan](docs/build.md#vulkan) | GPU |
|
||||||
| [CANN](docs/build.md#cann) | Ascend NPU |
|
| [CANN](docs/build.md#cann) | Ascend NPU |
|
||||||
| [OpenCL](docs/backend/OPENCL.md) | Adreno GPU |
|
| [OpenCL](docs/backend/OPENCL.md) | Adreno GPU |
|
||||||
| [RPC](https://github.com/ggml-org/llama.cpp/tree/master/examples/rpc) | All |
|
| [RPC](https://github.com/ggml-org/llama.cpp/tree/master/tools/rpc) | All |
|
||||||
|
|
||||||
## Building the project
|
|
||||||
|
|
||||||
The main product of this project is the `llama` library. Its C-style interface can be found in [include/llama.h](include/llama.h).
|
|
||||||
The project also includes many example programs and tools using the `llama` library. The examples range from simple, minimal code snippets to sophisticated sub-projects such as an OpenAI-compatible HTTP server. Possible methods for obtaining the binaries:
|
|
||||||
|
|
||||||
- Clone this repository and build locally, see [how to build](docs/build.md)
|
|
||||||
- On MacOS or Linux, install `llama.cpp` via [brew, flox or nix](docs/install.md)
|
|
||||||
- Use a Docker image, see [documentation for Docker](docs/docker.md)
|
|
||||||
- Download pre-built binaries from [releases](https://github.com/ggml-org/llama.cpp/releases)
|
|
||||||
|
|
||||||
## Obtaining and quantizing models
|
## Obtaining and quantizing models
|
||||||
|
|
||||||
@ -260,7 +278,11 @@ The [Hugging Face](https://huggingface.co) platform hosts a [number of LLMs](htt
|
|||||||
- [Trending](https://huggingface.co/models?library=gguf&sort=trending)
|
- [Trending](https://huggingface.co/models?library=gguf&sort=trending)
|
||||||
- [LLaMA](https://huggingface.co/models?sort=trending&search=llama+gguf)
|
- [LLaMA](https://huggingface.co/models?sort=trending&search=llama+gguf)
|
||||||
|
|
||||||
You can either manually download the GGUF file or directly use any `llama.cpp`-compatible models from [Hugging Face](https://huggingface.co/) or other model hosting sites, such as [ModelScope](https://modelscope.cn/), by using this CLI argument: `-hf <user>/<model>[:quant]`.
|
You can either manually download the GGUF file or directly use any `llama.cpp`-compatible models from [Hugging Face](https://huggingface.co/) or other model hosting sites, such as [ModelScope](https://modelscope.cn/), by using this CLI argument: `-hf <user>/<model>[:quant]`. For example:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
llama-cli -hf ggml-org/gemma-3-1b-it-GGUF
|
||||||
|
```
|
||||||
|
|
||||||
By default, the CLI would download from Hugging Face, you can switch to other options with the environment variable `MODEL_ENDPOINT`. For example, you may opt to downloading model checkpoints from ModelScope or other model sharing communities by setting the environment variable, e.g. `MODEL_ENDPOINT=https://www.modelscope.cn/`.
|
By default, the CLI would download from Hugging Face, you can switch to other options with the environment variable `MODEL_ENDPOINT`. For example, you may opt to downloading model checkpoints from ModelScope or other model sharing communities by setting the environment variable, e.g. `MODEL_ENDPOINT=https://www.modelscope.cn/`.
|
||||||
|
|
||||||
@ -275,9 +297,9 @@ The Hugging Face platform provides a variety of online tools for converting, qua
|
|||||||
- Use the [GGUF-editor space](https://huggingface.co/spaces/CISCai/gguf-editor) to edit GGUF meta data in the browser (more info: https://github.com/ggml-org/llama.cpp/discussions/9268)
|
- Use the [GGUF-editor space](https://huggingface.co/spaces/CISCai/gguf-editor) to edit GGUF meta data in the browser (more info: https://github.com/ggml-org/llama.cpp/discussions/9268)
|
||||||
- Use the [Inference Endpoints](https://ui.endpoints.huggingface.co/) to directly host `llama.cpp` in the cloud (more info: https://github.com/ggml-org/llama.cpp/discussions/9669)
|
- Use the [Inference Endpoints](https://ui.endpoints.huggingface.co/) to directly host `llama.cpp` in the cloud (more info: https://github.com/ggml-org/llama.cpp/discussions/9669)
|
||||||
|
|
||||||
To learn more about model quantization, [read this documentation](examples/quantize/README.md)
|
To learn more about model quantization, [read this documentation](tools/quantize/README.md)
|
||||||
|
|
||||||
## [`llama-cli`](examples/main)
|
## [`llama-cli`](tools/main)
|
||||||
|
|
||||||
#### A CLI tool for accessing and experimenting with most of `llama.cpp`'s functionality.
|
#### A CLI tool for accessing and experimenting with most of `llama.cpp`'s functionality.
|
||||||
|
|
||||||
@ -340,7 +362,7 @@ To learn more about model quantization, [read this documentation](examples/quant
|
|||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
|
||||||
## [`llama-server`](examples/server)
|
## [`llama-server`](tools/server)
|
||||||
|
|
||||||
#### A lightweight, [OpenAI API](https://github.com/openai/openai-openapi) compatible, HTTP server for serving LLMs.
|
#### A lightweight, [OpenAI API](https://github.com/openai/openai-openapi) compatible, HTTP server for serving LLMs.
|
||||||
|
|
||||||
@ -410,7 +432,7 @@ To learn more about model quantization, [read this documentation](examples/quant
|
|||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
|
||||||
## [`llama-perplexity`](examples/perplexity)
|
## [`llama-perplexity`](tools/perplexity)
|
||||||
|
|
||||||
#### A tool for measuring the perplexity [^1][^2] (and other quality metrics) of a model over a given text.
|
#### A tool for measuring the perplexity [^1][^2] (and other quality metrics) of a model over a given text.
|
||||||
|
|
||||||
@ -435,10 +457,10 @@ To learn more about model quantization, [read this documentation](examples/quant
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
[^1]: [examples/perplexity/README.md](./examples/perplexity/README.md)
|
[^1]: [tools/perplexity/README.md](./tools/perplexity/README.md)
|
||||||
[^2]: [https://huggingface.co/docs/transformers/perplexity](https://huggingface.co/docs/transformers/perplexity)
|
[^2]: [https://huggingface.co/docs/transformers/perplexity](https://huggingface.co/docs/transformers/perplexity)
|
||||||
|
|
||||||
## [`llama-bench`](examples/llama-bench)
|
## [`llama-bench`](tools/llama-bench)
|
||||||
|
|
||||||
#### Benchmark the performance of the inference for various parameters.
|
#### Benchmark the performance of the inference for various parameters.
|
||||||
|
|
||||||
@ -459,7 +481,7 @@ To learn more about model quantization, [read this documentation](examples/quant
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
## [`llama-run`](examples/run)
|
## [`llama-run`](tools/run)
|
||||||
|
|
||||||
#### A comprehensive example for running `llama.cpp` models. Useful for inferencing. Used with RamaLama [^3].
|
#### A comprehensive example for running `llama.cpp` models. Useful for inferencing. Used with RamaLama [^3].
|
||||||
|
|
||||||
@ -503,8 +525,8 @@ To learn more about model quantization, [read this documentation](examples/quant
|
|||||||
|
|
||||||
## Other documentation
|
## Other documentation
|
||||||
|
|
||||||
- [main (cli)](examples/main/README.md)
|
- [main (cli)](tools/main/README.md)
|
||||||
- [server](examples/server/README.md)
|
- [server](tools/server/README.md)
|
||||||
- [GBNF grammars](grammars/README.md)
|
- [GBNF grammars](grammars/README.md)
|
||||||
|
|
||||||
#### Development documentation
|
#### Development documentation
|
||||||
@ -570,4 +592,12 @@ automatically. For example:
|
|||||||
$ echo "source ~/.llama-completion.bash" >> ~/.bashrc
|
$ echo "source ~/.llama-completion.bash" >> ~/.bashrc
|
||||||
```
|
```
|
||||||
|
|
||||||
## References
|
## Dependencies
|
||||||
|
|
||||||
|
- [yhirose/cpp-httplib](https://github.com/yhirose/cpp-httplib) - Single-header HTTP server, used by `llama-server` - MIT license
|
||||||
|
- [stb-image](https://github.com/nothings/stb) - Single-header image format decoder, used by multimodal subsystem - Public domain
|
||||||
|
- [nlohmann/json](https://github.com/nlohmann/json) - Single-header JSON library, used by various tools/examples - MIT License
|
||||||
|
- [minja](https://github.com/google/minja) - Minimal Jinja parser in C++, used by various tools/examples - MIT License
|
||||||
|
- [linenoise.cpp](./tools/run/linenoise.cpp/linenoise.cpp) - C++ library that provides readline-like line editing capabilities, used by `llama-run` - BSD 2-Clause License
|
||||||
|
- [curl](https://curl.se/) - Client-side URL transfer library, used by various tools/examples - [CURL License](https://curl.se/docs/copyright.html)
|
||||||
|
- [miniaudio.h](https://github.com/mackron/miniaudio) - Single-header audio format decoder, used by multimodal subsystem - Public domain
|
||||||
|
@ -40,7 +40,8 @@ To protect sensitive data from potential leaks or unauthorized access, it is cru
|
|||||||
### Untrusted environments or networks
|
### Untrusted environments or networks
|
||||||
|
|
||||||
If you can't run your models in a secure and isolated environment or if it must be exposed to an untrusted network, make sure to take the following security precautions:
|
If you can't run your models in a secure and isolated environment or if it must be exposed to an untrusted network, make sure to take the following security precautions:
|
||||||
* Confirm the hash of any downloaded artifact (e.g. pre-trained model weights) matches a known-good value
|
* Do not use the RPC backend, [rpc-server](https://github.com/ggml-org/llama.cpp/tree/master/tools/rpc) and [llama-server](https://github.com/ggml-org/llama.cpp/tree/master/tools/server) functionality (see https://github.com/ggml-org/llama.cpp/pull/13061).
|
||||||
|
* Confirm the hash of any downloaded artifact (e.g. pre-trained model weights) matches a known-good value.
|
||||||
* Encrypt your data if sending it over the network.
|
* Encrypt your data if sending it over the network.
|
||||||
|
|
||||||
### Multi-Tenant environments
|
### Multi-Tenant environments
|
||||||
|
@ -8,6 +8,7 @@ TVOS_MIN_OS_VERSION=16.4
|
|||||||
|
|
||||||
BUILD_SHARED_LIBS=OFF
|
BUILD_SHARED_LIBS=OFF
|
||||||
LLAMA_BUILD_EXAMPLES=OFF
|
LLAMA_BUILD_EXAMPLES=OFF
|
||||||
|
LLAMA_BUILD_TOOLS=OFF
|
||||||
LLAMA_BUILD_TESTS=OFF
|
LLAMA_BUILD_TESTS=OFF
|
||||||
LLAMA_BUILD_SERVER=OFF
|
LLAMA_BUILD_SERVER=OFF
|
||||||
GGML_METAL=ON
|
GGML_METAL=ON
|
||||||
@ -31,6 +32,7 @@ COMMON_CMAKE_ARGS=(
|
|||||||
-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
|
-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
|
||||||
-DBUILD_SHARED_LIBS=${BUILD_SHARED_LIBS}
|
-DBUILD_SHARED_LIBS=${BUILD_SHARED_LIBS}
|
||||||
-DLLAMA_BUILD_EXAMPLES=${LLAMA_BUILD_EXAMPLES}
|
-DLLAMA_BUILD_EXAMPLES=${LLAMA_BUILD_EXAMPLES}
|
||||||
|
-DLLAMA_BUILD_TOOLS=${LLAMA_BUILD_TOOLS}
|
||||||
-DLLAMA_BUILD_TESTS=${LLAMA_BUILD_TESTS}
|
-DLLAMA_BUILD_TESTS=${LLAMA_BUILD_TESTS}
|
||||||
-DLLAMA_BUILD_SERVER=${LLAMA_BUILD_SERVER}
|
-DLLAMA_BUILD_SERVER=${LLAMA_BUILD_SERVER}
|
||||||
-DGGML_METAL_EMBED_LIBRARY=${GGML_METAL_EMBED_LIBRARY}
|
-DGGML_METAL_EMBED_LIBRARY=${GGML_METAL_EMBED_LIBRARY}
|
||||||
@ -115,6 +117,7 @@ setup_framework_structure() {
|
|||||||
# Copy all required headers (common for all platforms)
|
# Copy all required headers (common for all platforms)
|
||||||
cp include/llama.h ${header_path}
|
cp include/llama.h ${header_path}
|
||||||
cp ggml/include/ggml.h ${header_path}
|
cp ggml/include/ggml.h ${header_path}
|
||||||
|
cp ggml/include/ggml-opt.h ${header_path}
|
||||||
cp ggml/include/ggml-alloc.h ${header_path}
|
cp ggml/include/ggml-alloc.h ${header_path}
|
||||||
cp ggml/include/ggml-backend.h ${header_path}
|
cp ggml/include/ggml-backend.h ${header_path}
|
||||||
cp ggml/include/ggml-metal.h ${header_path}
|
cp ggml/include/ggml-metal.h ${header_path}
|
||||||
|
@ -54,7 +54,7 @@ docker run --privileged -it \
|
|||||||
-v $HOME/llama.cpp/ci-cache:/ci-cache \
|
-v $HOME/llama.cpp/ci-cache:/ci-cache \
|
||||||
-v $HOME/llama.cpp/ci-results:/ci-results \
|
-v $HOME/llama.cpp/ci-results:/ci-results \
|
||||||
-v $PWD:/ws -w /ws \
|
-v $PWD:/ws -w /ws \
|
||||||
mthreads/musa:rc3.1.1-devel-ubuntu22.04
|
mthreads/musa:rc4.0.1-mudnn-devel-ubuntu22.04
|
||||||
```
|
```
|
||||||
|
|
||||||
Inside the container, execute the following commands:
|
Inside the container, execute the following commands:
|
||||||
|
25
ci/run.sh
25
ci/run.sh
@ -39,14 +39,27 @@ sd=`dirname $0`
|
|||||||
cd $sd/../
|
cd $sd/../
|
||||||
SRC=`pwd`
|
SRC=`pwd`
|
||||||
|
|
||||||
CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=OFF"
|
CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON"
|
||||||
|
|
||||||
if [ ! -z ${GG_BUILD_METAL} ]; then
|
if [ ! -z ${GG_BUILD_METAL} ]; then
|
||||||
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON -DGGML_METAL_USE_BF16=ON"
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON -DGGML_METAL_USE_BF16=ON"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -z ${GG_BUILD_CUDA} ]; then
|
if [ ! -z ${GG_BUILD_CUDA} ]; then
|
||||||
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_CUDA=ON -DCMAKE_CUDA_ARCHITECTURES=native"
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_CUDA=ON"
|
||||||
|
|
||||||
|
if command -v nvidia-smi >/dev/null 2>&1; then
|
||||||
|
CUDA_ARCH=$(nvidia-smi --query-gpu=compute_cap --format=csv,noheader,nounits 2>/dev/null | head -1 | tr -d '.')
|
||||||
|
if [[ -n "$CUDA_ARCH" && "$CUDA_ARCH" =~ ^[0-9]+$ ]]; then
|
||||||
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DCMAKE_CUDA_ARCHITECTURES=${CUDA_ARCH}"
|
||||||
|
else
|
||||||
|
echo "Warning: Using fallback CUDA architectures"
|
||||||
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DCMAKE_CUDA_ARCHITECTURES=61;70;75;80;86;89"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Error: nvidia-smi not found, cannot build with CUDA"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -z ${GG_BUILD_SYCL} ]; then
|
if [ ! -z ${GG_BUILD_SYCL} ]; then
|
||||||
@ -187,8 +200,8 @@ function gg_run_test_scripts_debug {
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
(cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
(cd ./tools/gguf-split && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
||||||
(cd ./examples/quantize && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
(cd ./tools/quantize && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
}
|
}
|
||||||
@ -211,8 +224,8 @@ function gg_run_test_scripts_release {
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
(cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-release/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
(cd ./tools/gguf-split && time bash tests.sh "$SRC/build-ci-release/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
||||||
(cd ./examples/quantize && time bash tests.sh "$SRC/build-ci-release/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
(cd ./tools/quantize && time bash tests.sh "$SRC/build-ci-release/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
}
|
}
|
||||||
|
@ -1,6 +0,0 @@
|
|||||||
set( CMAKE_SYSTEM_NAME Windows )
|
|
||||||
set( CMAKE_SYSTEM_PROCESSOR arm64 )
|
|
||||||
|
|
||||||
set( target arm64-pc-windows-msvc )
|
|
||||||
set( CMAKE_C_COMPILER_TARGET ${target} )
|
|
||||||
set( CMAKE_CXX_COMPILER_TARGET ${target} )
|
|
@ -41,14 +41,20 @@ endif()
|
|||||||
|
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
set(BUILD_COMPILER "${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}")
|
set(BUILD_COMPILER "${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}")
|
||||||
set(BUILD_TARGET ${CMAKE_VS_PLATFORM_NAME})
|
if (CMAKE_VS_PLATFORM_NAME)
|
||||||
|
set(BUILD_TARGET ${CMAKE_VS_PLATFORM_NAME})
|
||||||
|
else()
|
||||||
|
set(BUILD_TARGET "${CMAKE_SYSTEM_NAME} ${CMAKE_SYSTEM_PROCESSOR}")
|
||||||
|
endif()
|
||||||
else()
|
else()
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND sh -c "\"$@\" --version | head -1" _ ${CMAKE_C_COMPILER}
|
COMMAND ${CMAKE_C_COMPILER} --version
|
||||||
OUTPUT_VARIABLE OUT
|
OUTPUT_VARIABLE OUT
|
||||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
)
|
)
|
||||||
|
string(REGEX REPLACE " *\n.*" "" OUT "${OUT}")
|
||||||
set(BUILD_COMPILER ${OUT})
|
set(BUILD_COMPILER ${OUT})
|
||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND ${CMAKE_C_COMPILER} -dumpmachine
|
COMMAND ${CMAKE_C_COMPILER} -dumpmachine
|
||||||
OUTPUT_VARIABLE OUT
|
OUTPUT_VARIABLE OUT
|
||||||
|
@ -3,9 +3,3 @@ set( CMAKE_SYSTEM_PROCESSOR x86_64 )
|
|||||||
|
|
||||||
set( CMAKE_C_COMPILER clang )
|
set( CMAKE_C_COMPILER clang )
|
||||||
set( CMAKE_CXX_COMPILER clang++ )
|
set( CMAKE_CXX_COMPILER clang++ )
|
||||||
|
|
||||||
set( arch_c_flags "-march=native" )
|
|
||||||
|
|
||||||
set( CMAKE_C_FLAGS_INIT "${arch_c_flags}" )
|
|
||||||
set( CMAKE_CXX_FLAGS_INIT "${arch_c_flags}" )
|
|
||||||
|
|
||||||
|
@ -7,8 +7,8 @@ llama_add_compile_flags()
|
|||||||
# Build info header
|
# Build info header
|
||||||
#
|
#
|
||||||
|
|
||||||
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/../.git")
|
if(EXISTS "${PROJECT_SOURCE_DIR}/.git")
|
||||||
set(GIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../.git")
|
set(GIT_DIR "${PROJECT_SOURCE_DIR}/.git")
|
||||||
|
|
||||||
# Is git submodule
|
# Is git submodule
|
||||||
if(NOT IS_DIRECTORY "${GIT_DIR}")
|
if(NOT IS_DIRECTORY "${GIT_DIR}")
|
||||||
@ -18,34 +18,26 @@ if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/../.git")
|
|||||||
if (SLASH_POS EQUAL 0)
|
if (SLASH_POS EQUAL 0)
|
||||||
set(GIT_DIR "${REAL_GIT_DIR}")
|
set(GIT_DIR "${REAL_GIT_DIR}")
|
||||||
else()
|
else()
|
||||||
set(GIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../${REAL_GIT_DIR}")
|
set(GIT_DIR "${PROJECT_SOURCE_DIR}/${REAL_GIT_DIR}")
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(EXISTS "${GIT_DIR}/index")
|
if(EXISTS "${GIT_DIR}/index")
|
||||||
set(GIT_INDEX "${GIT_DIR}/index")
|
# For build-info.cpp below
|
||||||
|
set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS "${GIT_DIR}/index")
|
||||||
else()
|
else()
|
||||||
message(WARNING "Git index not found in git repository.")
|
message(WARNING "Git index not found in git repository.")
|
||||||
set(GIT_INDEX "")
|
|
||||||
endif()
|
endif()
|
||||||
else()
|
else()
|
||||||
message(WARNING "Git repository not found; to enable automatic generation of build info, make sure Git is installed and the project is a Git repository.")
|
message(WARNING "Git repository not found; to enable automatic generation of build info, make sure Git is installed and the project is a Git repository.")
|
||||||
set(GIT_INDEX "")
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Add a custom command to rebuild build-info.cpp when .git/index changes
|
set(TEMPLATE_FILE "${CMAKE_CURRENT_SOURCE_DIR}/build-info.cpp.in")
|
||||||
add_custom_command(
|
set(OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/build-info.cpp")
|
||||||
OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/build-info.cpp"
|
configure_file(${TEMPLATE_FILE} ${OUTPUT_FILE})
|
||||||
COMMENT "Generating build details from Git"
|
|
||||||
COMMAND ${CMAKE_COMMAND} -DMSVC=${MSVC} -DCMAKE_C_COMPILER_VERSION=${CMAKE_C_COMPILER_VERSION}
|
|
||||||
-DCMAKE_C_COMPILER_ID=${CMAKE_C_COMPILER_ID} -DCMAKE_VS_PLATFORM_NAME=${CMAKE_VS_PLATFORM_NAME}
|
|
||||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -P "${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info-gen-cpp.cmake"
|
|
||||||
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/.."
|
|
||||||
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/build-info.cpp.in" ${GIT_INDEX}
|
|
||||||
VERBATIM
|
|
||||||
)
|
|
||||||
set(TARGET build_info)
|
set(TARGET build_info)
|
||||||
add_library(${TARGET} OBJECT build-info.cpp)
|
add_library(${TARGET} OBJECT ${OUTPUT_FILE})
|
||||||
if (BUILD_SHARED_LIBS)
|
if (BUILD_SHARED_LIBS)
|
||||||
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
endif()
|
endif()
|
||||||
@ -56,21 +48,24 @@ add_library(${TARGET} STATIC
|
|||||||
arg.cpp
|
arg.cpp
|
||||||
arg.h
|
arg.h
|
||||||
base64.hpp
|
base64.hpp
|
||||||
|
chat-parser.cpp
|
||||||
|
chat-parser.h
|
||||||
chat.cpp
|
chat.cpp
|
||||||
chat.h
|
chat.h
|
||||||
common.cpp
|
common.cpp
|
||||||
common.h
|
common.h
|
||||||
console.cpp
|
console.cpp
|
||||||
console.h
|
console.h
|
||||||
|
json-partial.cpp
|
||||||
|
json-partial.h
|
||||||
json-schema-to-grammar.cpp
|
json-schema-to-grammar.cpp
|
||||||
json.hpp
|
|
||||||
llguidance.cpp
|
llguidance.cpp
|
||||||
log.cpp
|
log.cpp
|
||||||
log.h
|
log.h
|
||||||
minja/chat-template.hpp
|
|
||||||
minja/minja.hpp
|
|
||||||
ngram-cache.cpp
|
ngram-cache.cpp
|
||||||
ngram-cache.h
|
ngram-cache.h
|
||||||
|
regex-partial.cpp
|
||||||
|
regex-partial.h
|
||||||
sampling.cpp
|
sampling.cpp
|
||||||
sampling.h
|
sampling.h
|
||||||
speculative.cpp
|
speculative.cpp
|
||||||
@ -117,8 +112,8 @@ if (LLAMA_LLGUIDANCE)
|
|||||||
|
|
||||||
ExternalProject_Add(llguidance_ext
|
ExternalProject_Add(llguidance_ext
|
||||||
GIT_REPOSITORY https://github.com/guidance-ai/llguidance
|
GIT_REPOSITORY https://github.com/guidance-ai/llguidance
|
||||||
# v0.7.10:
|
# v0.7.20 (+ fix to build on GCC 15):
|
||||||
GIT_TAG 0309d2a6bf40abda35344a362edc71e06d5009f8
|
GIT_TAG b5b8b64dba11c4e4ee6b1d1450d3a3ae279891e8
|
||||||
PREFIX ${CMAKE_BINARY_DIR}/llguidance
|
PREFIX ${CMAKE_BINARY_DIR}/llguidance
|
||||||
SOURCE_DIR ${LLGUIDANCE_SRC}
|
SOURCE_DIR ${LLGUIDANCE_SRC}
|
||||||
BUILD_IN_SOURCE TRUE
|
BUILD_IN_SOURCE TRUE
|
||||||
@ -139,6 +134,30 @@ if (LLAMA_LLGUIDANCE)
|
|||||||
set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} llguidance ${LLGUIDANCE_PLATFORM_LIBS})
|
set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} llguidance ${LLGUIDANCE_PLATFORM_LIBS})
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
target_include_directories(${TARGET} PUBLIC .)
|
target_include_directories(${TARGET} PUBLIC . ../vendor)
|
||||||
target_compile_features (${TARGET} PUBLIC cxx_std_17)
|
target_compile_features (${TARGET} PUBLIC cxx_std_17)
|
||||||
target_link_libraries (${TARGET} PRIVATE ${LLAMA_COMMON_EXTRA_LIBS} PUBLIC llama Threads::Threads)
|
target_link_libraries (${TARGET} PRIVATE ${LLAMA_COMMON_EXTRA_LIBS} PUBLIC llama Threads::Threads)
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# copy the license files
|
||||||
|
#
|
||||||
|
|
||||||
|
# Check if running in GitHub Actions
|
||||||
|
if (DEFINED ENV{GITHUB_ACTIONS} AND "$ENV{GITHUB_ACTIONS}" STREQUAL "true")
|
||||||
|
message(STATUS "Running inside GitHub Actions - copying license files")
|
||||||
|
|
||||||
|
# Copy all files from licenses/ to build/bin/
|
||||||
|
file(GLOB LICENSE_FILES "${CMAKE_SOURCE_DIR}/licenses/*")
|
||||||
|
foreach(LICENSE_FILE ${LICENSE_FILES})
|
||||||
|
get_filename_component(FILENAME ${LICENSE_FILE} NAME)
|
||||||
|
add_custom_command(
|
||||||
|
POST_BUILD
|
||||||
|
TARGET ${TARGET}
|
||||||
|
COMMAND ${CMAKE_COMMAND} -E copy_if_different
|
||||||
|
"${LICENSE_FILE}"
|
||||||
|
"$<TARGET_FILE_DIR:llama>/${FILENAME}"
|
||||||
|
COMMENT "Copying ${FILENAME} to ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}")
|
||||||
|
message(STATUS "Copying ${LICENSE_FILE} to ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${FILENAME}")
|
||||||
|
endforeach()
|
||||||
|
endif()
|
||||||
|
628
common/arg.cpp
628
common/arg.cpp
File diff suppressed because it is too large
Load Diff
@ -78,3 +78,12 @@ bool common_params_parse(int argc, char ** argv, common_params & params, llama_e
|
|||||||
|
|
||||||
// function to be used by test-arg-parser
|
// function to be used by test-arg-parser
|
||||||
common_params_context common_params_parser_init(common_params & params, llama_example ex, void(*print_usage)(int, char **) = nullptr);
|
common_params_context common_params_parser_init(common_params & params, llama_example ex, void(*print_usage)(int, char **) = nullptr);
|
||||||
|
bool common_has_curl();
|
||||||
|
|
||||||
|
struct common_remote_params {
|
||||||
|
std::vector<std::string> headers;
|
||||||
|
long timeout = 0; // CURLOPT_TIMEOUT, in seconds ; 0 means no timeout
|
||||||
|
long max_size = 0; // max size of the response ; unlimited if 0 ; max is 2GB
|
||||||
|
};
|
||||||
|
// get remote file content, returns <http_code, raw_response_body>
|
||||||
|
std::pair<long, std::vector<char>> common_remote_get_content(const std::string & url, const common_remote_params & params);
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
int LLAMA_BUILD_NUMBER = @BUILD_NUMBER@;
|
int LLAMA_BUILD_NUMBER = @LLAMA_BUILD_NUMBER@;
|
||||||
char const *LLAMA_COMMIT = "@BUILD_COMMIT@";
|
char const *LLAMA_COMMIT = "@LLAMA_BUILD_COMMIT@";
|
||||||
char const *LLAMA_COMPILER = "@BUILD_COMPILER@";
|
char const *LLAMA_COMPILER = "@BUILD_COMPILER@";
|
||||||
char const *LLAMA_BUILD_TARGET = "@BUILD_TARGET@";
|
char const *LLAMA_BUILD_TARGET = "@BUILD_TARGET@";
|
||||||
|
385
common/chat-parser.cpp
Normal file
385
common/chat-parser.cpp
Normal file
@ -0,0 +1,385 @@
|
|||||||
|
#include "chat-parser.h"
|
||||||
|
#include "common.h"
|
||||||
|
#include "log.h"
|
||||||
|
#include "regex-partial.h"
|
||||||
|
|
||||||
|
#include <optional>
|
||||||
|
#include <stdexcept>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
using json = nlohmann::ordered_json;
|
||||||
|
|
||||||
|
common_chat_msg_parser::common_chat_msg_parser(const std::string & input, bool is_partial, const common_chat_syntax & syntax)
|
||||||
|
: input_(input), is_partial_(is_partial), syntax_(syntax)
|
||||||
|
{
|
||||||
|
result_.role = "assistant";
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
std::string id = std::to_string(std::rand());
|
||||||
|
if (input.find(id) == std::string::npos) {
|
||||||
|
healing_marker_ = id;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string common_chat_msg_parser::str(const common_string_range & rng) const {
|
||||||
|
GGML_ASSERT(rng.begin <= rng.end);
|
||||||
|
return input_.substr(rng.begin, rng.end - rng.begin);
|
||||||
|
}
|
||||||
|
|
||||||
|
void common_chat_msg_parser::add_content(const std::string &content) {
|
||||||
|
result_.content += content;
|
||||||
|
}
|
||||||
|
|
||||||
|
void common_chat_msg_parser::add_reasoning_content(const std::string &reasoning_content) {
|
||||||
|
result_.reasoning_content += reasoning_content;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool common_chat_msg_parser::add_tool_call(const std::string & name, const std::string & id, const std::string & arguments) {
|
||||||
|
if (name.empty()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
common_chat_tool_call tool_call;
|
||||||
|
tool_call.name = name;
|
||||||
|
tool_call.arguments = arguments;
|
||||||
|
tool_call.id = id;
|
||||||
|
|
||||||
|
// LOG_DBG("Tool call arguments:\n\traw: %s\n\tresult: %s\n", arguments.c_str(), tool_call.arguments.c_str());
|
||||||
|
result_.tool_calls.emplace_back(tool_call);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
bool common_chat_msg_parser::add_tool_call(const json & tool_call) {
|
||||||
|
std::string name = tool_call.contains("name") ? tool_call.at("name") : "";
|
||||||
|
std::string id = tool_call.contains("id") ? tool_call.at("id") : "";
|
||||||
|
std::string arguments = tool_call.contains("arguments") ? tool_call.at("arguments") : "";
|
||||||
|
return add_tool_call(name, id, arguments);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool common_chat_msg_parser::add_tool_calls(const json & arr) {
|
||||||
|
for (const auto & item : arr) {
|
||||||
|
if (!add_tool_call(item)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
void common_chat_msg_parser::finish() {
|
||||||
|
if (!is_partial_ && pos_ != input_.size()) {
|
||||||
|
throw std::runtime_error("Unexpected content at end of input");// + input_.substr(pos_));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool common_chat_msg_parser::consume_spaces() {
|
||||||
|
const auto length = input_.size();
|
||||||
|
auto consumed = false;
|
||||||
|
while (pos_ < length && std::isspace(input_[pos_])) {
|
||||||
|
++pos_;
|
||||||
|
consumed = true;
|
||||||
|
}
|
||||||
|
return consumed;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool common_chat_msg_parser::try_consume_literal(const std::string & literal) {
|
||||||
|
auto pos = pos_;
|
||||||
|
for (auto i = 0u; i < literal.size(); ++i) {
|
||||||
|
if (pos >= input_.size()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (input_[pos] != literal[i]) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
++pos;
|
||||||
|
}
|
||||||
|
pos_ = pos;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<common_chat_msg_parser::find_regex_result> common_chat_msg_parser::try_find_literal(const std::string & literal) {
|
||||||
|
auto idx = input_.find(literal, pos_);
|
||||||
|
if (idx != std::string::npos) {
|
||||||
|
find_regex_result res;
|
||||||
|
res.prelude = input_.substr(pos_, idx - pos_);
|
||||||
|
auto end = idx + literal.size();
|
||||||
|
res.groups.emplace_back(common_string_range{idx, end});
|
||||||
|
move_to(end);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
if (is_partial_) {
|
||||||
|
idx = string_find_partial_stop(input_, literal);
|
||||||
|
if (idx != std::string::npos && idx >= pos_) {
|
||||||
|
find_regex_result res;
|
||||||
|
res.prelude = input_.substr(pos_, idx - pos_);
|
||||||
|
auto end = input_.size();
|
||||||
|
res.groups.emplace_back(common_string_range{idx, end});
|
||||||
|
move_to(end);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
void common_chat_msg_parser::consume_literal(const std::string & literal) {
|
||||||
|
if (!try_consume_literal(literal)) {
|
||||||
|
throw common_chat_msg_partial_exception(literal);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool common_chat_msg_parser::try_parse_reasoning(const std::string & start_think, const std::string & end_think) {
|
||||||
|
auto handle_reasoning = [&](const std::string & reasoning, bool closed) {
|
||||||
|
auto stripped_reasoning = string_strip(reasoning);
|
||||||
|
if (stripped_reasoning.empty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (syntax_.reasoning_in_content) {
|
||||||
|
add_content(syntax_.reasoning_format == COMMON_REASONING_FORMAT_DEEPSEEK ? "<think>" : start_think);
|
||||||
|
add_content(stripped_reasoning);
|
||||||
|
if (closed) {
|
||||||
|
add_content(syntax_.reasoning_format == COMMON_REASONING_FORMAT_DEEPSEEK ? "</think>" : end_think);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
add_reasoning_content(stripped_reasoning);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if (syntax_.reasoning_format != COMMON_REASONING_FORMAT_NONE) {
|
||||||
|
if (syntax_.thinking_forced_open || try_consume_literal(start_think)) {
|
||||||
|
if (auto res = try_find_literal(end_think)) {
|
||||||
|
handle_reasoning(res->prelude, /* closed */ true);
|
||||||
|
consume_spaces();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
auto rest = consume_rest();
|
||||||
|
if (!rest.empty()) {
|
||||||
|
handle_reasoning(rest, /* closed */ !is_partial());
|
||||||
|
}
|
||||||
|
// Allow unclosed thinking tags, for now (https://github.com/ggml-org/llama.cpp/issues/13812, https://github.com/ggml-org/llama.cpp/issues/13877)
|
||||||
|
// if (!syntax_.thinking_forced_open) {
|
||||||
|
// throw common_chat_msg_partial_exception(end_think);
|
||||||
|
// }
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string common_chat_msg_parser::consume_rest() {
|
||||||
|
auto rest = input_.substr(pos_);
|
||||||
|
pos_ = input_.size();
|
||||||
|
return rest;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tries to find the regex, consumes it (pos right after it) and gives the prelude (right before it) and the groups to the callback.
|
||||||
|
std::optional<common_chat_msg_parser::find_regex_result> common_chat_msg_parser::try_find_regex(const common_regex & regex, size_t from, bool add_prelude_to_content) {
|
||||||
|
auto m = regex.search(input_, from == std::string::npos ? pos_ : from);
|
||||||
|
if (m.type == COMMON_REGEX_MATCH_TYPE_NONE) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
auto prelude = input_.substr(pos_, m.groups[0].begin - pos_);
|
||||||
|
pos_ = m.groups[0].end;
|
||||||
|
|
||||||
|
if (add_prelude_to_content) {
|
||||||
|
add_content(prelude);
|
||||||
|
}
|
||||||
|
if (m.type == COMMON_REGEX_MATCH_TYPE_PARTIAL) {
|
||||||
|
if (is_partial()) {
|
||||||
|
throw common_chat_msg_partial_exception(regex.str());
|
||||||
|
}
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
return find_regex_result{prelude, m.groups};
|
||||||
|
}
|
||||||
|
|
||||||
|
common_chat_msg_parser::find_regex_result common_chat_msg_parser::consume_regex(const common_regex & regex) {
|
||||||
|
if (auto result = try_consume_regex(regex)) {
|
||||||
|
return *result;
|
||||||
|
}
|
||||||
|
throw common_chat_msg_partial_exception(regex.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<common_chat_msg_parser::find_regex_result> common_chat_msg_parser::try_consume_regex(const common_regex & regex) {
|
||||||
|
auto m = regex.search(input_, pos_);
|
||||||
|
if (m.type == COMMON_REGEX_MATCH_TYPE_NONE) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
if (m.type == COMMON_REGEX_MATCH_TYPE_PARTIAL) {
|
||||||
|
if (is_partial()) {
|
||||||
|
throw common_chat_msg_partial_exception(regex.str());
|
||||||
|
}
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
if (m.groups[0].begin != pos_) {
|
||||||
|
// Didn't match at the current position.
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
pos_ = m.groups[0].end;
|
||||||
|
|
||||||
|
return find_regex_result {
|
||||||
|
/* .prelude = */ "",
|
||||||
|
m.groups,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<common_json> common_chat_msg_parser::try_consume_json() {
|
||||||
|
auto it = input_.cbegin() + pos_;
|
||||||
|
const auto end = input_.cend();
|
||||||
|
common_json result;
|
||||||
|
if (!common_json_parse(it, end, healing_marker_, result)) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
pos_ = std::distance(input_.cbegin(), it);
|
||||||
|
if (result.healing_marker.marker.empty()) {
|
||||||
|
// No healing marker, just return the parsed json
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
if (!is_partial()) {
|
||||||
|
throw common_chat_msg_partial_exception("JSON");
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
common_json common_chat_msg_parser::consume_json() {
|
||||||
|
if (auto result = try_consume_json()) {
|
||||||
|
return *result;
|
||||||
|
}
|
||||||
|
throw common_chat_msg_partial_exception("JSON");
|
||||||
|
}
|
||||||
|
|
||||||
|
common_chat_msg_parser::consume_json_result common_chat_msg_parser::consume_json_with_dumped_args(
|
||||||
|
const std::vector<std::vector<std::string>> & args_paths,
|
||||||
|
const std::vector<std::vector<std::string>> & content_paths
|
||||||
|
) {
|
||||||
|
if (auto result = try_consume_json_with_dumped_args(args_paths, content_paths)) {
|
||||||
|
return *result;
|
||||||
|
}
|
||||||
|
throw common_chat_msg_partial_exception("JSON");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<common_chat_msg_parser::consume_json_result> common_chat_msg_parser::try_consume_json_with_dumped_args(
|
||||||
|
const std::vector<std::vector<std::string>> & args_paths,
|
||||||
|
const std::vector<std::vector<std::string>> & content_paths
|
||||||
|
) {
|
||||||
|
auto partial = try_consume_json();
|
||||||
|
if (!partial) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
auto is_arguments_path = [&](const std::vector<std::string> & path) {
|
||||||
|
return std::find(args_paths.begin(), args_paths.end(), path) != args_paths.end();
|
||||||
|
};
|
||||||
|
auto is_content_path = [&](const std::vector<std::string> & path) {
|
||||||
|
return std::find(content_paths.begin(), content_paths.end(), path) != content_paths.end();
|
||||||
|
};
|
||||||
|
|
||||||
|
if (partial->healing_marker.marker.empty()) {
|
||||||
|
if (args_paths.empty()) {
|
||||||
|
// No arguments to dump, and JSON was parsed fully.
|
||||||
|
return consume_json_result {
|
||||||
|
partial->json,
|
||||||
|
/* .is_partial = */ false,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
if (is_arguments_path({})) {
|
||||||
|
// Entire JSON is the arguments and was parsed fully.
|
||||||
|
return consume_json_result {
|
||||||
|
partial->json.dump(),
|
||||||
|
/* .is_partial = */ false,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_DBG("Parsed partial JSON: %s (json_healing_marker: %s)\n", partial->json.dump().c_str(), partial->healing_marker.json_dump_marker.c_str());
|
||||||
|
|
||||||
|
auto found_healing_marker = false;
|
||||||
|
std::vector<std::string> path;
|
||||||
|
std::function<json(const json &)> remove_unsupported_healings_and_dump_args = [&](const json & j) -> json {
|
||||||
|
if (is_arguments_path(path)) {
|
||||||
|
auto arguments = j.dump();
|
||||||
|
if (is_partial() && !partial->healing_marker.marker.empty()) {
|
||||||
|
auto idx = arguments.find(partial->healing_marker.json_dump_marker);
|
||||||
|
if (idx != std::string::npos) {
|
||||||
|
arguments.resize(idx);
|
||||||
|
found_healing_marker = true;
|
||||||
|
}
|
||||||
|
if (arguments == "\"") {
|
||||||
|
// This happens because of completing `:"$magic` after `"arguments"`
|
||||||
|
arguments = "";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return arguments;
|
||||||
|
}
|
||||||
|
if (is_content_path(path)) {
|
||||||
|
if (!j.is_string()) {
|
||||||
|
throw std::runtime_error("Content path must be a string");
|
||||||
|
}
|
||||||
|
std::string str = j;
|
||||||
|
auto idx = str.find(partial->healing_marker.marker); // not using json_dump_marker as we're inside a string
|
||||||
|
if (idx != std::string::npos) {
|
||||||
|
str.resize(idx);
|
||||||
|
found_healing_marker = true;
|
||||||
|
}
|
||||||
|
return str;
|
||||||
|
}
|
||||||
|
if (j.is_object()) {
|
||||||
|
auto obj = json::object();
|
||||||
|
for (const auto & p : j.items()) {
|
||||||
|
const auto & key = p.key();
|
||||||
|
const auto & value = p.value();
|
||||||
|
const std::string key_str = key; // NOLINT
|
||||||
|
auto idx = key_str.find(healing_marker_);
|
||||||
|
if (idx != std::string::npos) {
|
||||||
|
found_healing_marker = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
path.push_back(key_str);
|
||||||
|
if (value.is_string()) {
|
||||||
|
const std::string value_str = value;
|
||||||
|
if (value_str.find(healing_marker_) != std::string::npos) {
|
||||||
|
found_healing_marker = true;
|
||||||
|
if (is_content_path(path)) {
|
||||||
|
if (partial->healing_marker.marker == partial->healing_marker.json_dump_marker) {
|
||||||
|
// The healing occurred inside the string: good. Otherwise we just ditch the entire key/value pair.
|
||||||
|
obj[key] = remove_unsupported_healings_and_dump_args(value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
obj[key] = value;
|
||||||
|
} else {
|
||||||
|
obj[key] = remove_unsupported_healings_and_dump_args(value);
|
||||||
|
}
|
||||||
|
path.pop_back();
|
||||||
|
}
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
if (j.is_array()) {
|
||||||
|
auto arr = json::array();
|
||||||
|
for (const auto & value : j) {
|
||||||
|
if (value.is_string()) {
|
||||||
|
std::string str = value;
|
||||||
|
auto idx = str.find(healing_marker_);
|
||||||
|
if (idx != std::string::npos) {
|
||||||
|
// Don't heal array values that aren't in the arguments.
|
||||||
|
found_healing_marker = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
arr.push_back(remove_unsupported_healings_and_dump_args(value));
|
||||||
|
}
|
||||||
|
return arr;
|
||||||
|
}
|
||||||
|
return j;
|
||||||
|
};
|
||||||
|
|
||||||
|
auto cleaned = remove_unsupported_healings_and_dump_args(partial->json);
|
||||||
|
LOG_DBG("Cleaned up JSON %s to %s (json_healing_marker : '%s')\n", partial->json.dump().c_str(), cleaned.dump().c_str(), partial->healing_marker.json_dump_marker.c_str());
|
||||||
|
return consume_json_result {
|
||||||
|
cleaned,
|
||||||
|
/* .is_partial = */ found_healing_marker,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
void common_chat_msg_parser::clear_tools() {
|
||||||
|
result_.tool_calls.clear();
|
||||||
|
}
|
120
common/chat-parser.h
Normal file
120
common/chat-parser.h
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "chat.h"
|
||||||
|
#include "json-partial.h"
|
||||||
|
#include "regex-partial.h"
|
||||||
|
|
||||||
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
|
#include <optional>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
class common_chat_msg_partial_exception : public std::runtime_error {
|
||||||
|
public:
|
||||||
|
common_chat_msg_partial_exception(const std::string & message) : std::runtime_error(message) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
class common_chat_msg_parser {
|
||||||
|
std::string input_;
|
||||||
|
bool is_partial_;
|
||||||
|
common_chat_syntax syntax_;
|
||||||
|
std::string healing_marker_;
|
||||||
|
|
||||||
|
size_t pos_ = 0;
|
||||||
|
common_chat_msg result_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
common_chat_msg_parser(const std::string & input, bool is_partial, const common_chat_syntax & syntax);
|
||||||
|
const std::string & input() const { return input_; }
|
||||||
|
size_t pos() const { return pos_; }
|
||||||
|
const std::string & healing_marker() const { return healing_marker_; }
|
||||||
|
const bool & is_partial() const { return is_partial_; }
|
||||||
|
const common_chat_msg & result() const { return result_; }
|
||||||
|
const common_chat_syntax & syntax() const { return syntax_; }
|
||||||
|
|
||||||
|
void move_to(size_t pos) {
|
||||||
|
if (pos > input_.size()) {
|
||||||
|
throw std::runtime_error("Invalid position!");
|
||||||
|
}
|
||||||
|
pos_ = pos;
|
||||||
|
}
|
||||||
|
void move_back(size_t n) {
|
||||||
|
if (pos_ < n) {
|
||||||
|
throw std::runtime_error("Can't move back that far!");
|
||||||
|
}
|
||||||
|
pos_ -= n;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the substring of the input at the given range
|
||||||
|
std::string str(const common_string_range & rng) const;
|
||||||
|
|
||||||
|
// Appends to the result.content field
|
||||||
|
void add_content(const std::string & content);
|
||||||
|
|
||||||
|
// Appends to the result.reasoning_content field
|
||||||
|
void add_reasoning_content(const std::string & reasoning_content);
|
||||||
|
|
||||||
|
// Adds a tool call to the result. If the tool call is too incomplete (e.g. name empty), it won't add anything.
|
||||||
|
bool add_tool_call(const std::string & name, const std::string & id, const std::string & arguments);
|
||||||
|
|
||||||
|
// Adds a tool call using the "name", "id" and "arguments" fields of the json object
|
||||||
|
bool add_tool_call(const nlohmann::ordered_json & tool_call);
|
||||||
|
|
||||||
|
// Adds an array of tool calls using their "name", "id" and "arguments" fields.
|
||||||
|
bool add_tool_calls(const nlohmann::ordered_json & arr);
|
||||||
|
|
||||||
|
void finish();
|
||||||
|
|
||||||
|
bool consume_spaces();
|
||||||
|
|
||||||
|
void consume_literal(const std::string & literal);
|
||||||
|
|
||||||
|
bool try_parse_reasoning(const std::string & start_think, const std::string & end_think);
|
||||||
|
|
||||||
|
std::string consume_rest();
|
||||||
|
|
||||||
|
struct find_regex_result {
|
||||||
|
std::string prelude;
|
||||||
|
std::vector<common_string_range> groups;
|
||||||
|
};
|
||||||
|
|
||||||
|
std::optional<find_regex_result> try_find_regex(const common_regex & regex, size_t from = std::string::npos, bool add_prelude_to_content = true);
|
||||||
|
|
||||||
|
bool try_consume_literal(const std::string & literal);
|
||||||
|
|
||||||
|
std::optional<find_regex_result> try_find_literal(const std::string & literal);
|
||||||
|
|
||||||
|
find_regex_result consume_regex(const common_regex & regex);
|
||||||
|
|
||||||
|
std::optional<find_regex_result> try_consume_regex(const common_regex & regex);
|
||||||
|
|
||||||
|
std::optional<common_json> try_consume_json();
|
||||||
|
common_json consume_json();
|
||||||
|
|
||||||
|
struct consume_json_result {
|
||||||
|
nlohmann::ordered_json value;
|
||||||
|
bool is_partial;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
Consume (possibly partial) json and converts specific subtrees to (possibly truncated) JSON strings.
|
||||||
|
|
||||||
|
By default, object keys can't be truncated, nor can string values (their corresponding key is removed,
|
||||||
|
e.g. `{"foo": "bar", "baz": "b` -> `{"foo": "bar"}`
|
||||||
|
|
||||||
|
But one can allow subpaths to be kept truncated, and possibly json-dumped to truncated json strings
|
||||||
|
- with `content_paths={{"foo"}}` -> `{"foo": "b` -> {"foo": "b"}`
|
||||||
|
- with `args_paths={{"foo"}}` -> `{"foo": {"b` -> `{"foo": "{b"}`
|
||||||
|
*/
|
||||||
|
consume_json_result consume_json_with_dumped_args(
|
||||||
|
const std::vector<std::vector<std::string>> & args_paths = {},
|
||||||
|
const std::vector<std::vector<std::string>> & content_paths = {}
|
||||||
|
);
|
||||||
|
std::optional<consume_json_result> try_consume_json_with_dumped_args(
|
||||||
|
const std::vector<std::vector<std::string>> & args_paths = {},
|
||||||
|
const std::vector<std::vector<std::string>> & content_paths = {}
|
||||||
|
);
|
||||||
|
|
||||||
|
void clear_tools();
|
||||||
|
};
|
1603
common/chat.cpp
1603
common/chat.cpp
File diff suppressed because it is too large
Load Diff
@ -3,6 +3,8 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
#include <functional>
|
||||||
|
#include <chrono>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
@ -12,11 +14,19 @@ struct common_chat_tool_call {
|
|||||||
std::string name;
|
std::string name;
|
||||||
std::string arguments;
|
std::string arguments;
|
||||||
std::string id;
|
std::string id;
|
||||||
|
|
||||||
|
bool operator==(const common_chat_tool_call & other) const {
|
||||||
|
return name == other.name && arguments == other.arguments && id == other.id;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct common_chat_msg_content_part {
|
struct common_chat_msg_content_part {
|
||||||
std::string type;
|
std::string type;
|
||||||
std::string text;
|
std::string text;
|
||||||
|
|
||||||
|
bool operator==(const common_chat_msg_content_part & other) const {
|
||||||
|
return type == other.type && text == other.text;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct common_chat_msg {
|
struct common_chat_msg {
|
||||||
@ -27,6 +37,51 @@ struct common_chat_msg {
|
|||||||
std::string reasoning_content;
|
std::string reasoning_content;
|
||||||
std::string tool_name;
|
std::string tool_name;
|
||||||
std::string tool_call_id;
|
std::string tool_call_id;
|
||||||
|
|
||||||
|
template <class T> T to_json_oaicompat() const;
|
||||||
|
|
||||||
|
bool empty() const {
|
||||||
|
return content.empty() && content_parts.empty() && tool_calls.empty() && reasoning_content.empty() && tool_name.empty() && tool_call_id.empty();
|
||||||
|
}
|
||||||
|
void ensure_tool_call_ids_set(std::vector<std::string> & ids_cache, const std::function<std::string()> & gen_tool_call_id) {
|
||||||
|
for (auto i = 0u; i < tool_calls.size(); i++) {
|
||||||
|
if (ids_cache.size() <= i) {
|
||||||
|
auto id = tool_calls[i].id;
|
||||||
|
if (id.empty()) {
|
||||||
|
id = gen_tool_call_id();
|
||||||
|
}
|
||||||
|
ids_cache.push_back(id);
|
||||||
|
}
|
||||||
|
tool_calls[i].id = ids_cache[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bool operator==(const common_chat_msg & other) const {
|
||||||
|
return role == other.role
|
||||||
|
&& content == other.content
|
||||||
|
&& content_parts == other.content_parts
|
||||||
|
&& tool_calls == other.tool_calls
|
||||||
|
&& reasoning_content == other.reasoning_content
|
||||||
|
&& tool_name == other.tool_name
|
||||||
|
&& tool_call_id == other.tool_call_id;
|
||||||
|
}
|
||||||
|
bool operator!=(const common_chat_msg & other) const {
|
||||||
|
return !(*this == other);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct common_chat_msg_diff {
|
||||||
|
std::string reasoning_content_delta;
|
||||||
|
std::string content_delta;
|
||||||
|
size_t tool_call_index = std::string::npos;
|
||||||
|
common_chat_tool_call tool_call_delta;
|
||||||
|
|
||||||
|
static std::vector<common_chat_msg_diff> compute_diffs(const common_chat_msg & previous_msg, const common_chat_msg & new_msg);
|
||||||
|
|
||||||
|
bool operator==(const common_chat_msg_diff & other) const {
|
||||||
|
return content_delta == other.content_delta
|
||||||
|
&& tool_call_index == other.tool_call_index
|
||||||
|
&& tool_call_delta == other.tool_call_delta;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct common_chat_tool {
|
struct common_chat_tool {
|
||||||
@ -48,14 +103,11 @@ enum common_chat_format {
|
|||||||
COMMON_CHAT_FORMAT_LLAMA_3_X,
|
COMMON_CHAT_FORMAT_LLAMA_3_X,
|
||||||
COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS,
|
COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS,
|
||||||
COMMON_CHAT_FORMAT_DEEPSEEK_R1,
|
COMMON_CHAT_FORMAT_DEEPSEEK_R1,
|
||||||
COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING,
|
|
||||||
COMMON_CHAT_FORMAT_FIREFUNCTION_V2,
|
COMMON_CHAT_FORMAT_FIREFUNCTION_V2,
|
||||||
COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2,
|
COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2,
|
||||||
COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1,
|
COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1,
|
||||||
COMMON_CHAT_FORMAT_HERMES_2_PRO,
|
COMMON_CHAT_FORMAT_HERMES_2_PRO,
|
||||||
COMMON_CHAT_FORMAT_HERMES_2_PRO_EXTRACT_REASONING,
|
|
||||||
COMMON_CHAT_FORMAT_COMMAND_R7B,
|
COMMON_CHAT_FORMAT_COMMAND_R7B,
|
||||||
COMMON_CHAT_FORMAT_COMMAND_R7B_EXTRACT_REASONING,
|
|
||||||
|
|
||||||
COMMON_CHAT_FORMAT_COUNT, // Not a format, just the # formats
|
COMMON_CHAT_FORMAT_COUNT, // Not a format, just the # formats
|
||||||
};
|
};
|
||||||
@ -70,7 +122,9 @@ struct common_chat_templates_inputs {
|
|||||||
std::vector<common_chat_tool> tools;
|
std::vector<common_chat_tool> tools;
|
||||||
common_chat_tool_choice tool_choice = COMMON_CHAT_TOOL_CHOICE_AUTO;
|
common_chat_tool_choice tool_choice = COMMON_CHAT_TOOL_CHOICE_AUTO;
|
||||||
bool parallel_tool_calls = false;
|
bool parallel_tool_calls = false;
|
||||||
bool extract_reasoning = true;
|
common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_NONE;
|
||||||
|
bool enable_thinking = true;
|
||||||
|
std::chrono::system_clock::time_point now = std::chrono::system_clock::now();
|
||||||
};
|
};
|
||||||
|
|
||||||
struct common_chat_params {
|
struct common_chat_params {
|
||||||
@ -78,11 +132,21 @@ struct common_chat_params {
|
|||||||
std::string prompt;
|
std::string prompt;
|
||||||
std::string grammar;
|
std::string grammar;
|
||||||
bool grammar_lazy = false;
|
bool grammar_lazy = false;
|
||||||
|
bool thinking_forced_open = false;
|
||||||
std::vector<common_grammar_trigger> grammar_triggers;
|
std::vector<common_grammar_trigger> grammar_triggers;
|
||||||
std::vector<std::string> preserved_tokens;
|
std::vector<std::string> preserved_tokens;
|
||||||
std::vector<std::string> additional_stops;
|
std::vector<std::string> additional_stops;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct common_chat_syntax {
|
||||||
|
common_chat_format format = COMMON_CHAT_FORMAT_CONTENT_ONLY;
|
||||||
|
common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_NONE;
|
||||||
|
// Whether reasoning_content should be inlined in the content (e.g. for reasoning_format=deepseek in stream mode)
|
||||||
|
bool reasoning_in_content = false;
|
||||||
|
bool thinking_forced_open = false;
|
||||||
|
bool parse_tool_calls = true;
|
||||||
|
};
|
||||||
|
|
||||||
// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
|
// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
|
||||||
bool common_chat_verify_template(const std::string & tmpl, bool use_jinja);
|
bool common_chat_verify_template(const std::string & tmpl, bool use_jinja);
|
||||||
|
|
||||||
@ -119,8 +183,9 @@ std::string common_chat_format_example(
|
|||||||
const struct common_chat_templates * tmpls,
|
const struct common_chat_templates * tmpls,
|
||||||
bool use_jinja);
|
bool use_jinja);
|
||||||
|
|
||||||
std::string common_chat_format_name(common_chat_format format);
|
const char* common_chat_format_name(common_chat_format format);
|
||||||
common_chat_msg common_chat_parse( const std::string & input, common_chat_format format);
|
const char* common_reasoning_format_name(common_reasoning_format format);
|
||||||
|
common_chat_msg common_chat_parse(const std::string & input, bool is_partial, const common_chat_syntax & syntax);
|
||||||
|
|
||||||
common_chat_tool_choice common_chat_tool_choice_parse_oaicompat(const std::string & tool_choice);
|
common_chat_tool_choice common_chat_tool_choice_parse_oaicompat(const std::string & tool_choice);
|
||||||
|
|
||||||
@ -133,3 +198,5 @@ template <class T> T common_chat_msgs_to_json_oaicompat(const std::vector<common
|
|||||||
// T can be std::string containing JSON or nlohmann::ordered_json
|
// T can be std::string containing JSON or nlohmann::ordered_json
|
||||||
template <class T> std::vector<common_chat_tool> common_chat_tools_parse_oaicompat(const T & tools);
|
template <class T> std::vector<common_chat_tool> common_chat_tools_parse_oaicompat(const T & tools);
|
||||||
template <class T> T common_chat_tools_to_json_oaicompat(const std::vector<common_chat_tool> & tools);
|
template <class T> T common_chat_tools_to_json_oaicompat(const std::vector<common_chat_tool> & tools);
|
||||||
|
|
||||||
|
template <class T> T common_chat_msg_diff_to_json_oaicompat(const common_chat_msg_diff & diff);
|
||||||
|
@ -1,24 +0,0 @@
|
|||||||
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake)
|
|
||||||
|
|
||||||
set(TEMPLATE_FILE "${CMAKE_CURRENT_SOURCE_DIR}/common/build-info.cpp.in")
|
|
||||||
set(OUTPUT_FILE "${CMAKE_CURRENT_SOURCE_DIR}/common/build-info.cpp")
|
|
||||||
|
|
||||||
# Only write the build info if it changed
|
|
||||||
if(EXISTS ${OUTPUT_FILE})
|
|
||||||
file(READ ${OUTPUT_FILE} CONTENTS)
|
|
||||||
string(REGEX MATCH "LLAMA_COMMIT = \"([^\"]*)\";" _ ${CONTENTS})
|
|
||||||
set(OLD_COMMIT ${CMAKE_MATCH_1})
|
|
||||||
string(REGEX MATCH "LLAMA_COMPILER = \"([^\"]*)\";" _ ${CONTENTS})
|
|
||||||
set(OLD_COMPILER ${CMAKE_MATCH_1})
|
|
||||||
string(REGEX MATCH "LLAMA_BUILD_TARGET = \"([^\"]*)\";" _ ${CONTENTS})
|
|
||||||
set(OLD_TARGET ${CMAKE_MATCH_1})
|
|
||||||
if (
|
|
||||||
NOT OLD_COMMIT STREQUAL BUILD_COMMIT OR
|
|
||||||
NOT OLD_COMPILER STREQUAL BUILD_COMPILER OR
|
|
||||||
NOT OLD_TARGET STREQUAL BUILD_TARGET
|
|
||||||
)
|
|
||||||
configure_file(${TEMPLATE_FILE} ${OUTPUT_FILE})
|
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
configure_file(${TEMPLATE_FILE} ${OUTPUT_FILE})
|
|
||||||
endif()
|
|
@ -203,6 +203,7 @@ bool set_process_priority(enum ggml_sched_priority prio) {
|
|||||||
|
|
||||||
DWORD p = NORMAL_PRIORITY_CLASS;
|
DWORD p = NORMAL_PRIORITY_CLASS;
|
||||||
switch (prio) {
|
switch (prio) {
|
||||||
|
case GGML_SCHED_PRIO_LOW: p = BELOW_NORMAL_PRIORITY_CLASS; break;
|
||||||
case GGML_SCHED_PRIO_NORMAL: p = NORMAL_PRIORITY_CLASS; break;
|
case GGML_SCHED_PRIO_NORMAL: p = NORMAL_PRIORITY_CLASS; break;
|
||||||
case GGML_SCHED_PRIO_MEDIUM: p = ABOVE_NORMAL_PRIORITY_CLASS; break;
|
case GGML_SCHED_PRIO_MEDIUM: p = ABOVE_NORMAL_PRIORITY_CLASS; break;
|
||||||
case GGML_SCHED_PRIO_HIGH: p = HIGH_PRIORITY_CLASS; break;
|
case GGML_SCHED_PRIO_HIGH: p = HIGH_PRIORITY_CLASS; break;
|
||||||
@ -228,6 +229,7 @@ bool set_process_priority(enum ggml_sched_priority prio) {
|
|||||||
|
|
||||||
int p = 0;
|
int p = 0;
|
||||||
switch (prio) {
|
switch (prio) {
|
||||||
|
case GGML_SCHED_PRIO_LOW: p = 5; break;
|
||||||
case GGML_SCHED_PRIO_NORMAL: p = 0; break;
|
case GGML_SCHED_PRIO_NORMAL: p = 0; break;
|
||||||
case GGML_SCHED_PRIO_MEDIUM: p = -5; break;
|
case GGML_SCHED_PRIO_MEDIUM: p = -5; break;
|
||||||
case GGML_SCHED_PRIO_HIGH: p = -10; break;
|
case GGML_SCHED_PRIO_HIGH: p = -10; break;
|
||||||
@ -443,9 +445,28 @@ void string_replace_all(std::string & s, const std::string & search, const std::
|
|||||||
s = std::move(builder);
|
s = std::move(builder);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool string_ends_with(const std::string_view & str, const std::string_view & suffix) {
|
||||||
|
return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0;
|
||||||
|
}
|
||||||
|
size_t string_find_partial_stop(const std::string_view & str, const std::string_view & stop) {
|
||||||
|
if (!str.empty() && !stop.empty()) {
|
||||||
|
const char text_last_char = str.back();
|
||||||
|
for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) {
|
||||||
|
if (stop[char_index] == text_last_char) {
|
||||||
|
const auto current_partial = stop.substr(0, char_index + 1);
|
||||||
|
if (string_ends_with(str, current_partial)) {
|
||||||
|
return str.size() - char_index - 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::string::npos;
|
||||||
|
}
|
||||||
|
|
||||||
std::string regex_escape(const std::string & s) {
|
std::string regex_escape(const std::string & s) {
|
||||||
static const std::regex special_chars("[.^$|()*+?\\[\\]{}\\\\]");
|
static const std::regex special_chars("[.^$|()*+?\\[\\]{}\\\\]");
|
||||||
return std::regex_replace(s, special_chars, "\\$0");
|
return std::regex_replace(s, special_chars, "\\$&");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string string_join(const std::vector<std::string> & values, const std::string & separator) {
|
std::string string_join(const std::vector<std::string> & values, const std::string & separator) {
|
||||||
@ -746,6 +767,9 @@ bool fs_validate_filename(const std::string & filename) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
|
||||||
// returns true if successful, false otherwise
|
// returns true if successful, false otherwise
|
||||||
bool fs_create_directory_with_parents(const std::string & path) {
|
bool fs_create_directory_with_parents(const std::string & path) {
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
@ -763,9 +787,16 @@ bool fs_create_directory_with_parents(const std::string & path) {
|
|||||||
// process path from front to back, procedurally creating directories
|
// process path from front to back, procedurally creating directories
|
||||||
while ((pos_slash = path.find('\\', pos_slash)) != std::string::npos) {
|
while ((pos_slash = path.find('\\', pos_slash)) != std::string::npos) {
|
||||||
const std::wstring subpath = wpath.substr(0, pos_slash);
|
const std::wstring subpath = wpath.substr(0, pos_slash);
|
||||||
const wchar_t * test = subpath.c_str();
|
|
||||||
|
|
||||||
const bool success = CreateDirectoryW(test, NULL);
|
pos_slash += 1;
|
||||||
|
|
||||||
|
// skip the drive letter, in some systems it can return an access denied error
|
||||||
|
if (subpath.length() == 2 && subpath[1] == ':') {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const bool success = CreateDirectoryW(subpath.c_str(), NULL);
|
||||||
|
|
||||||
if (!success) {
|
if (!success) {
|
||||||
const DWORD error = GetLastError();
|
const DWORD error = GetLastError();
|
||||||
|
|
||||||
@ -779,8 +810,6 @@ bool fs_create_directory_with_parents(const std::string & path) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pos_slash += 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -830,7 +859,7 @@ std::string fs_get_cache_directory() {
|
|||||||
if (getenv("LLAMA_CACHE")) {
|
if (getenv("LLAMA_CACHE")) {
|
||||||
cache_directory = std::getenv("LLAMA_CACHE");
|
cache_directory = std::getenv("LLAMA_CACHE");
|
||||||
} else {
|
} else {
|
||||||
#if defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)
|
#if defined(__linux__) || defined(__FreeBSD__) || defined(_AIX) || defined(__OpenBSD__)
|
||||||
if (std::getenv("XDG_CACHE_HOME")) {
|
if (std::getenv("XDG_CACHE_HOME")) {
|
||||||
cache_directory = std::getenv("XDG_CACHE_HOME");
|
cache_directory = std::getenv("XDG_CACHE_HOME");
|
||||||
} else {
|
} else {
|
||||||
@ -876,31 +905,6 @@ struct common_init_result common_init_from_params(common_params & params) {
|
|||||||
|
|
||||||
const llama_vocab * vocab = llama_model_get_vocab(model);
|
const llama_vocab * vocab = llama_model_get_vocab(model);
|
||||||
|
|
||||||
if (params.reranking) {
|
|
||||||
bool ok = true;
|
|
||||||
|
|
||||||
if (llama_vocab_bos(vocab) == LLAMA_TOKEN_NULL) {
|
|
||||||
LOG_WRN("%s: warning: vocab does not have a BOS token, reranking will not work\n", __func__);
|
|
||||||
ok = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (llama_vocab_eos(vocab) == LLAMA_TOKEN_NULL) {
|
|
||||||
LOG_WRN("%s: warning: vocab does not have an EOS token, reranking will not work\n", __func__);
|
|
||||||
ok = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (llama_vocab_sep(vocab) == LLAMA_TOKEN_NULL) {
|
|
||||||
LOG_WRN("%s: warning: vocab does not have a SEP token, reranking will not work\n", __func__);
|
|
||||||
ok = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!ok) {
|
|
||||||
llama_model_free(model);
|
|
||||||
|
|
||||||
return iparams;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
auto cparams = common_context_params_to_llama(params);
|
auto cparams = common_context_params_to_llama(params);
|
||||||
|
|
||||||
llama_context * lctx = llama_init_from_model(model, cparams);
|
llama_context * lctx = llama_init_from_model(model, cparams);
|
||||||
@ -910,7 +914,7 @@ struct common_init_result common_init_from_params(common_params & params) {
|
|||||||
return iparams;
|
return iparams;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.ctx_shift && !llama_kv_self_can_shift(lctx)) {
|
if (params.ctx_shift && !llama_memory_can_shift(llama_get_memory(lctx))) {
|
||||||
LOG_WRN("%s: KV cache shifting is not supported for this context, disabling KV cache shifting\n", __func__);
|
LOG_WRN("%s: KV cache shifting is not supported for this context, disabling KV cache shifting\n", __func__);
|
||||||
params.ctx_shift = false;
|
params.ctx_shift = false;
|
||||||
}
|
}
|
||||||
@ -942,6 +946,35 @@ struct common_init_result common_init_from_params(common_params & params) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (llama_pooling_type(lctx) == LLAMA_POOLING_TYPE_RANK) {
|
||||||
|
bool ok = true;
|
||||||
|
|
||||||
|
if (llama_vocab_bos(vocab) == LLAMA_TOKEN_NULL) {
|
||||||
|
LOG_WRN("%s: warning: vocab does not have a BOS token, reranking will not work\n", __func__);
|
||||||
|
ok = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool has_eos = llama_vocab_eos(vocab) != LLAMA_TOKEN_NULL;
|
||||||
|
bool has_sep = llama_vocab_sep(vocab) != LLAMA_TOKEN_NULL;
|
||||||
|
|
||||||
|
if (!has_eos && !has_sep) {
|
||||||
|
LOG_WRN("%s: warning: vocab does not have an EOS token or SEP token, reranking will not work\n", __func__);
|
||||||
|
ok = false;
|
||||||
|
} else if (!has_eos) {
|
||||||
|
LOG_WRN("%s: warning: vocab does not have an EOS token, using SEP token as fallback\n", __func__);
|
||||||
|
} else if (!has_sep) {
|
||||||
|
LOG_WRN("%s: warning: vocab does not have a SEP token, reranking will not work\n", __func__);
|
||||||
|
ok = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ok) {
|
||||||
|
llama_free(lctx);
|
||||||
|
llama_model_free(model);
|
||||||
|
|
||||||
|
return iparams;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// load and optionally apply lora adapters
|
// load and optionally apply lora adapters
|
||||||
for (auto & la : params.lora_adapters) {
|
for (auto & la : params.lora_adapters) {
|
||||||
llama_adapter_lora_ptr lora;
|
llama_adapter_lora_ptr lora;
|
||||||
@ -1017,7 +1050,7 @@ struct common_init_result common_init_from_params(common_params & params) {
|
|||||||
if (llama_model_has_decoder(model)) {
|
if (llama_model_has_decoder(model)) {
|
||||||
llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch)));
|
llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch)));
|
||||||
}
|
}
|
||||||
llama_kv_self_clear(lctx);
|
llama_memory_clear(llama_get_memory(lctx), true);
|
||||||
llama_synchronize(lctx);
|
llama_synchronize(lctx);
|
||||||
llama_perf_context_reset(lctx);
|
llama_perf_context_reset(lctx);
|
||||||
llama_set_warmup(lctx, false);
|
llama_set_warmup(lctx, false);
|
||||||
@ -1083,6 +1116,9 @@ struct llama_model_params common_model_params_to_llama(common_params & params) {
|
|||||||
mparams.tensor_buft_overrides = params.tensor_buft_overrides.data();
|
mparams.tensor_buft_overrides = params.tensor_buft_overrides.data();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mparams.progress_callback = params.load_progress_callback;
|
||||||
|
mparams.progress_callback_user_data = params.load_progress_callback_user_data;
|
||||||
|
|
||||||
return mparams;
|
return mparams;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1096,7 +1132,6 @@ struct llama_context_params common_context_params_to_llama(const common_params &
|
|||||||
cparams.n_threads = params.cpuparams.n_threads;
|
cparams.n_threads = params.cpuparams.n_threads;
|
||||||
cparams.n_threads_batch = params.cpuparams_batch.n_threads == -1 ?
|
cparams.n_threads_batch = params.cpuparams_batch.n_threads == -1 ?
|
||||||
params.cpuparams.n_threads : params.cpuparams_batch.n_threads;
|
params.cpuparams.n_threads : params.cpuparams_batch.n_threads;
|
||||||
cparams.logits_all = params.logits_all;
|
|
||||||
cparams.embeddings = params.embedding;
|
cparams.embeddings = params.embedding;
|
||||||
cparams.rope_scaling_type = params.rope_scaling_type;
|
cparams.rope_scaling_type = params.rope_scaling_type;
|
||||||
cparams.rope_freq_base = params.rope_freq_base;
|
cparams.rope_freq_base = params.rope_freq_base;
|
||||||
@ -1114,11 +1149,8 @@ struct llama_context_params common_context_params_to_llama(const common_params &
|
|||||||
cparams.offload_kqv = !params.no_kv_offload;
|
cparams.offload_kqv = !params.no_kv_offload;
|
||||||
cparams.flash_attn = params.flash_attn;
|
cparams.flash_attn = params.flash_attn;
|
||||||
cparams.no_perf = params.no_perf;
|
cparams.no_perf = params.no_perf;
|
||||||
|
cparams.op_offload = !params.no_op_offload;
|
||||||
if (params.reranking) {
|
cparams.swa_full = params.swa_full;
|
||||||
cparams.embeddings = true;
|
|
||||||
cparams.pooling_type = LLAMA_POOLING_TYPE_RANK;
|
|
||||||
}
|
|
||||||
|
|
||||||
cparams.type_k = params.cache_type_k;
|
cparams.type_k = params.cache_type_k;
|
||||||
cparams.type_v = params.cache_type_v;
|
cparams.type_v = params.cache_type_v;
|
||||||
@ -1306,81 +1338,6 @@ std::string common_detokenize(const struct llama_vocab * vocab, const std::vecto
|
|||||||
return text;
|
return text;
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
|
||||||
// KV cache utils
|
|
||||||
//
|
|
||||||
|
|
||||||
void common_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size) {
|
|
||||||
static const char slot_chars[] = ".123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+";
|
|
||||||
|
|
||||||
printf("=== Dumping KV cache. total cells %d, max sequences per cell %d, populated cells %d, total tokens in cache %d, largest empty slot=%d @ %d",
|
|
||||||
view.n_cells, view.n_seq_max, view.used_cells, view.token_count, view.max_contiguous, view.max_contiguous_idx);
|
|
||||||
|
|
||||||
llama_kv_cache_view_cell * c_curr = view.cells;
|
|
||||||
llama_seq_id * cs_curr = view.cells_sequences;
|
|
||||||
|
|
||||||
for (int i = 0; i < view.n_cells; i++, c_curr++, cs_curr += view.n_seq_max) {
|
|
||||||
if (i % row_size == 0) {
|
|
||||||
printf("\n%5d: ", i);
|
|
||||||
}
|
|
||||||
int seq_count = 0;
|
|
||||||
for (int j = 0; j < view.n_seq_max; j++) {
|
|
||||||
if (cs_curr[j] >= 0) { seq_count++; }
|
|
||||||
}
|
|
||||||
putchar(slot_chars[std::min(sizeof(slot_chars) - 2, size_t(seq_count))]);
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("\n=== Done dumping\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
void common_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size) {
|
|
||||||
static const char slot_chars[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
|
|
||||||
|
|
||||||
printf("=== Dumping KV cache. total cells %d, max sequences per cell %d, populated cells %d, total tokens in cache %d, largest empty slot=%d @ %d\n",
|
|
||||||
view.n_cells, view.n_seq_max, view.used_cells, view.token_count, view.max_contiguous, view.max_contiguous_idx);
|
|
||||||
|
|
||||||
std::unordered_map<llama_seq_id, size_t> seqs;
|
|
||||||
llama_kv_cache_view_cell * c_curr = view.cells;
|
|
||||||
llama_seq_id * cs_curr = view.cells_sequences;
|
|
||||||
|
|
||||||
for (int i = 0; i < view.n_cells; i++, c_curr++, cs_curr += view.n_seq_max) {
|
|
||||||
for (int j = 0; j < view.n_seq_max; j++) {
|
|
||||||
if (cs_curr[j] < 0) { continue; }
|
|
||||||
if (seqs.find(cs_curr[j]) == seqs.end()) {
|
|
||||||
if (seqs.size() + 1 >= sizeof(slot_chars)) { break; }
|
|
||||||
const size_t sz = seqs.size();
|
|
||||||
seqs[cs_curr[j]] = sz;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (seqs.size() + 1 >= sizeof(slot_chars)) { break; }
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("=== Sequence legend: ");
|
|
||||||
for (const auto & it : seqs) {
|
|
||||||
printf("%zu=%d, ", it.second, it.first);
|
|
||||||
}
|
|
||||||
printf("'+'=other sequence ids");
|
|
||||||
|
|
||||||
c_curr = view.cells;
|
|
||||||
cs_curr = view.cells_sequences;
|
|
||||||
for (int i = 0; i < view.n_cells; i++, c_curr++, cs_curr += view.n_seq_max) {
|
|
||||||
if (i % row_size == 0) {
|
|
||||||
printf("\n%5d: ", i);
|
|
||||||
}
|
|
||||||
for (int j = 0; j < view.n_seq_max; j++) {
|
|
||||||
if (cs_curr[j] >= 0) {
|
|
||||||
const auto & it = seqs.find(cs_curr[j]);
|
|
||||||
putchar(it != seqs.end() ? int(slot_chars[it->second]) : '+');
|
|
||||||
} else {
|
|
||||||
putchar('.');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
putchar(' ');
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("\n=== Done dumping\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Embedding utils
|
// Embedding utils
|
||||||
//
|
//
|
||||||
@ -1565,3 +1522,20 @@ common_control_vector_data common_control_vector_load(const std::vector<common_c
|
|||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ggml_opt_dataset_t common_opt_dataset_init(struct llama_context * ctx, const std::vector<llama_token> & tokens, int64_t stride) {
|
||||||
|
const int64_t ne_datapoint = llama_n_ctx(ctx);
|
||||||
|
const int64_t ndata = (tokens.size() - ne_datapoint - 1) / stride;
|
||||||
|
ggml_opt_dataset_t result = ggml_opt_dataset_init(
|
||||||
|
GGML_TYPE_I32, GGML_TYPE_I32, ne_datapoint, ne_datapoint, ndata, /*ndata_shard =*/ 1);
|
||||||
|
|
||||||
|
llama_token * data = (llama_token *) ggml_opt_dataset_data(result)->data;
|
||||||
|
llama_token * labels = (llama_token *) ggml_opt_dataset_labels(result)->data;
|
||||||
|
|
||||||
|
for (int64_t idata = 0; idata < ndata; ++idata) {
|
||||||
|
memcpy(data + idata*ne_datapoint, tokens.data() + idata*stride + 0, ne_datapoint*sizeof(llama_token));
|
||||||
|
memcpy(labels + idata*ne_datapoint, tokens.data() + idata*stride + 1, ne_datapoint*sizeof(llama_token));
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <string_view>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
@ -66,7 +67,6 @@ enum llama_example {
|
|||||||
LLAMA_EXAMPLE_COMMON,
|
LLAMA_EXAMPLE_COMMON,
|
||||||
LLAMA_EXAMPLE_SPECULATIVE,
|
LLAMA_EXAMPLE_SPECULATIVE,
|
||||||
LLAMA_EXAMPLE_MAIN,
|
LLAMA_EXAMPLE_MAIN,
|
||||||
LLAMA_EXAMPLE_INFILL,
|
|
||||||
LLAMA_EXAMPLE_EMBEDDING,
|
LLAMA_EXAMPLE_EMBEDDING,
|
||||||
LLAMA_EXAMPLE_PERPLEXITY,
|
LLAMA_EXAMPLE_PERPLEXITY,
|
||||||
LLAMA_EXAMPLE_RETRIEVAL,
|
LLAMA_EXAMPLE_RETRIEVAL,
|
||||||
@ -76,7 +76,7 @@ enum llama_example {
|
|||||||
LLAMA_EXAMPLE_SERVER,
|
LLAMA_EXAMPLE_SERVER,
|
||||||
LLAMA_EXAMPLE_CVECTOR_GENERATOR,
|
LLAMA_EXAMPLE_CVECTOR_GENERATOR,
|
||||||
LLAMA_EXAMPLE_EXPORT_LORA,
|
LLAMA_EXAMPLE_EXPORT_LORA,
|
||||||
LLAMA_EXAMPLE_LLAVA,
|
LLAMA_EXAMPLE_MTMD,
|
||||||
LLAMA_EXAMPLE_LOOKUP,
|
LLAMA_EXAMPLE_LOOKUP,
|
||||||
LLAMA_EXAMPLE_PARALLEL,
|
LLAMA_EXAMPLE_PARALLEL,
|
||||||
LLAMA_EXAMPLE_TTS,
|
LLAMA_EXAMPLE_TTS,
|
||||||
@ -96,6 +96,7 @@ enum common_sampler_type {
|
|||||||
COMMON_SAMPLER_TYPE_XTC = 8,
|
COMMON_SAMPLER_TYPE_XTC = 8,
|
||||||
COMMON_SAMPLER_TYPE_INFILL = 9,
|
COMMON_SAMPLER_TYPE_INFILL = 9,
|
||||||
COMMON_SAMPLER_TYPE_PENALTIES = 10,
|
COMMON_SAMPLER_TYPE_PENALTIES = 10,
|
||||||
|
COMMON_SAMPLER_TYPE_TOP_N_SIGMA = 11,
|
||||||
};
|
};
|
||||||
|
|
||||||
// dimensionality reduction methods, used by cvector-generator
|
// dimensionality reduction methods, used by cvector-generator
|
||||||
@ -114,7 +115,7 @@ enum common_grammar_trigger_type {
|
|||||||
COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN,
|
COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN,
|
||||||
COMMON_GRAMMAR_TRIGGER_TYPE_WORD,
|
COMMON_GRAMMAR_TRIGGER_TYPE_WORD,
|
||||||
COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
|
COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
|
||||||
COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_START,
|
COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct common_grammar_trigger {
|
struct common_grammar_trigger {
|
||||||
@ -161,6 +162,7 @@ struct common_params_sampling {
|
|||||||
std::vector<enum common_sampler_type> samplers = {
|
std::vector<enum common_sampler_type> samplers = {
|
||||||
COMMON_SAMPLER_TYPE_PENALTIES,
|
COMMON_SAMPLER_TYPE_PENALTIES,
|
||||||
COMMON_SAMPLER_TYPE_DRY,
|
COMMON_SAMPLER_TYPE_DRY,
|
||||||
|
COMMON_SAMPLER_TYPE_TOP_N_SIGMA,
|
||||||
COMMON_SAMPLER_TYPE_TOP_K,
|
COMMON_SAMPLER_TYPE_TOP_K,
|
||||||
COMMON_SAMPLER_TYPE_TYPICAL_P,
|
COMMON_SAMPLER_TYPE_TYPICAL_P,
|
||||||
COMMON_SAMPLER_TYPE_TOP_P,
|
COMMON_SAMPLER_TYPE_TOP_P,
|
||||||
@ -213,7 +215,8 @@ struct common_params_vocoder {
|
|||||||
|
|
||||||
enum common_reasoning_format {
|
enum common_reasoning_format {
|
||||||
COMMON_REASONING_FORMAT_NONE,
|
COMMON_REASONING_FORMAT_NONE,
|
||||||
COMMON_REASONING_FORMAT_DEEPSEEK, // Extract thinking tag contents and return as `message.reasoning_content`
|
COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY, // Extract thinking tag contents and return as `message.reasoning_content`, or leave inline in <think> tags in stream mode
|
||||||
|
COMMON_REASONING_FORMAT_DEEPSEEK, // Extract thinking tag contents and return as `message.reasoning_content`, including in streaming deltas.
|
||||||
};
|
};
|
||||||
|
|
||||||
struct common_params {
|
struct common_params {
|
||||||
@ -289,6 +292,7 @@ struct common_params {
|
|||||||
int32_t verbosity = 0;
|
int32_t verbosity = 0;
|
||||||
int32_t control_vector_layer_start = -1; // layer range for control vector
|
int32_t control_vector_layer_start = -1; // layer range for control vector
|
||||||
int32_t control_vector_layer_end = -1; // layer range for control vector
|
int32_t control_vector_layer_end = -1; // layer range for control vector
|
||||||
|
bool offline = false;
|
||||||
|
|
||||||
int32_t ppl_stride = 0; // stride for perplexity calculations. If left at 0, the pre-existing approach will be used.
|
int32_t ppl_stride = 0; // stride for perplexity calculations. If left at 0, the pre-existing approach will be used.
|
||||||
int32_t ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
|
int32_t ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
|
||||||
@ -321,17 +325,17 @@ struct common_params {
|
|||||||
bool flash_attn = false; // flash attention
|
bool flash_attn = false; // flash attention
|
||||||
bool no_perf = false; // disable performance metrics
|
bool no_perf = false; // disable performance metrics
|
||||||
bool ctx_shift = true; // context shift on inifinite text generation
|
bool ctx_shift = true; // context shift on inifinite text generation
|
||||||
|
bool swa_full = false; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)
|
||||||
|
|
||||||
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
|
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
|
||||||
bool logits_all = false; // return logits for all tokens in the batch
|
|
||||||
bool use_mmap = true; // use mmap for faster loads
|
bool use_mmap = true; // use mmap for faster loads
|
||||||
bool use_mlock = false; // use mlock to keep model in memory
|
bool use_mlock = false; // use mlock to keep model in memory
|
||||||
bool verbose_prompt = false; // print prompt tokens before generation
|
bool verbose_prompt = false; // print prompt tokens before generation
|
||||||
bool display_prompt = true; // print prompt before generation
|
bool display_prompt = true; // print prompt before generation
|
||||||
bool dump_kv_cache = false; // dump the KV cache contents for debugging purposes
|
|
||||||
bool no_kv_offload = false; // disable KV offloading
|
bool no_kv_offload = false; // disable KV offloading
|
||||||
bool warmup = true; // warmup run
|
bool warmup = true; // warmup run
|
||||||
bool check_tensors = false; // validate tensor data
|
bool check_tensors = false; // validate tensor data
|
||||||
|
bool no_op_offload = false; // globally disable offload host tensor operations to device
|
||||||
|
|
||||||
bool single_turn = false; // single turn chat conversation
|
bool single_turn = false; // single turn chat conversation
|
||||||
|
|
||||||
@ -340,8 +344,10 @@ struct common_params {
|
|||||||
|
|
||||||
common_conversation_mode conversation_mode = COMMON_CONVERSATION_MODE_AUTO;
|
common_conversation_mode conversation_mode = COMMON_CONVERSATION_MODE_AUTO;
|
||||||
|
|
||||||
// multimodal models (see examples/llava)
|
// multimodal models (see tools/mtmd)
|
||||||
struct common_params_model mmproj;
|
struct common_params_model mmproj;
|
||||||
|
bool mmproj_use_gpu = true; // use GPU for multimodal model
|
||||||
|
bool no_mmproj = false; // explicitly disable multimodal model
|
||||||
std::vector<std::string> image; // path to image file(s)
|
std::vector<std::string> image; // path to image file(s)
|
||||||
|
|
||||||
// embedding
|
// embedding
|
||||||
@ -349,7 +355,6 @@ struct common_params {
|
|||||||
int32_t embd_normalize = 2; // normalisation for embeddings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)
|
int32_t embd_normalize = 2; // normalisation for embeddings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)
|
||||||
std::string embd_out = ""; // empty = default, "array" = [[],[]...], "json" = openai style, "json+" = same "json" + cosine similarity matrix
|
std::string embd_out = ""; // empty = default, "array" = [[],[]...], "json" = openai style, "json+" = same "json" + cosine similarity matrix
|
||||||
std::string embd_sep = "\n"; // separator of embeddings
|
std::string embd_sep = "\n"; // separator of embeddings
|
||||||
bool reranking = false; // enable reranking support on server
|
|
||||||
|
|
||||||
// server params
|
// server params
|
||||||
int32_t port = 8080; // server listens on this network port
|
int32_t port = 8080; // server listens on this network port
|
||||||
@ -364,6 +369,8 @@ struct common_params {
|
|||||||
bool use_jinja = false; // NOLINT
|
bool use_jinja = false; // NOLINT
|
||||||
bool enable_chat_template = true;
|
bool enable_chat_template = true;
|
||||||
common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK;
|
common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK;
|
||||||
|
int reasoning_budget = -1;
|
||||||
|
bool prefill_assistant = true; // if true, any trailing assistant message will be prefilled into the response
|
||||||
|
|
||||||
std::vector<std::string> api_keys;
|
std::vector<std::string> api_keys;
|
||||||
|
|
||||||
@ -407,13 +414,14 @@ struct common_params {
|
|||||||
|
|
||||||
bool process_output = false; // collect data for the output tensor
|
bool process_output = false; // collect data for the output tensor
|
||||||
bool compute_ppl = true; // whether to compute perplexity
|
bool compute_ppl = true; // whether to compute perplexity
|
||||||
|
bool parse_special = false; // whether to parse special tokens during imatrix tokenization
|
||||||
|
|
||||||
// cvector-generator params
|
// cvector-generator params
|
||||||
int n_pca_batch = 100;
|
int n_pca_batch = 100;
|
||||||
int n_pca_iterations = 1000;
|
int n_pca_iterations = 1000;
|
||||||
dimre_method cvector_dimre_method = DIMRE_METHOD_PCA;
|
dimre_method cvector_dimre_method = DIMRE_METHOD_PCA;
|
||||||
std::string cvector_positive_file = "examples/cvector-generator/positive.txt";
|
std::string cvector_positive_file = "tools/cvector-generator/positive.txt";
|
||||||
std::string cvector_negative_file = "examples/cvector-generator/negative.txt";
|
std::string cvector_negative_file = "tools/cvector-generator/negative.txt";
|
||||||
|
|
||||||
bool spm_infill = false; // suffix/prefix/middle pattern for infill
|
bool spm_infill = false; // suffix/prefix/middle pattern for infill
|
||||||
|
|
||||||
@ -422,6 +430,11 @@ struct common_params {
|
|||||||
|
|
||||||
// common params
|
// common params
|
||||||
std::string out_file; // output filename for all example programs
|
std::string out_file; // output filename for all example programs
|
||||||
|
// optional callback for model loading progress and cancellation:
|
||||||
|
// called with a progress value between 0.0 and 1.0.
|
||||||
|
// return false from callback to abort model loading or true to continue
|
||||||
|
llama_progress_callback load_progress_callback = NULL;
|
||||||
|
void * load_progress_callback_user_data = NULL;
|
||||||
};
|
};
|
||||||
|
|
||||||
// call once at the start of a program if it uses libcommon
|
// call once at the start of a program if it uses libcommon
|
||||||
@ -499,10 +512,9 @@ static bool string_starts_with(const std::string & str,
|
|||||||
return str.rfind(prefix, 0) == 0;
|
return str.rfind(prefix, 0) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool string_ends_with(const std::string & str,
|
// While we wait for C++20's std::string::ends_with...
|
||||||
const std::string & suffix) { // While we wait for C++20's std::string::ends_with...
|
bool string_ends_with(const std::string_view & str, const std::string_view & suffix);
|
||||||
return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0;
|
size_t string_find_partial_stop(const std::string_view & str, const std::string_view & stop);
|
||||||
}
|
|
||||||
|
|
||||||
bool string_parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
|
bool string_parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
|
||||||
void string_process_escapes(std::string & input);
|
void string_process_escapes(std::string & input);
|
||||||
@ -611,16 +623,6 @@ std::string common_detokenize(
|
|||||||
const std::vector<llama_token> & tokens,
|
const std::vector<llama_token> & tokens,
|
||||||
bool special = true);
|
bool special = true);
|
||||||
|
|
||||||
//
|
|
||||||
// KV cache utils
|
|
||||||
//
|
|
||||||
|
|
||||||
// Dump the KV cache view with the number of sequences per cell.
|
|
||||||
void common_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size = 80);
|
|
||||||
|
|
||||||
// Dump the KV cache view showing individual sequences in each cell (long output).
|
|
||||||
void common_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Embedding utils
|
// Embedding utils
|
||||||
//
|
//
|
||||||
@ -662,3 +664,9 @@ const char * const LLM_KV_SPLIT_COUNT = "split.count";
|
|||||||
const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count";
|
const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count";
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// training utils
|
||||||
|
//
|
||||||
|
|
||||||
|
ggml_opt_dataset_t common_opt_dataset_init(struct llama_context * ctx, const std::vector<llama_token> & tokens, int64_t stride);
|
||||||
|
256
common/json-partial.cpp
Normal file
256
common/json-partial.cpp
Normal file
@ -0,0 +1,256 @@
|
|||||||
|
#include "json-partial.h"
|
||||||
|
|
||||||
|
#include "log.h"
|
||||||
|
|
||||||
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
using json = nlohmann::ordered_json;
|
||||||
|
|
||||||
|
enum common_json_stack_element_type {
|
||||||
|
COMMON_JSON_STACK_ELEMENT_OBJECT,
|
||||||
|
COMMON_JSON_STACK_ELEMENT_KEY,
|
||||||
|
COMMON_JSON_STACK_ELEMENT_ARRAY,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct common_json_stack_element {
|
||||||
|
common_json_stack_element_type type;
|
||||||
|
std::string key;
|
||||||
|
};
|
||||||
|
|
||||||
|
bool common_json_parse(
|
||||||
|
const std::string & input,
|
||||||
|
const std::string & healing_marker,
|
||||||
|
common_json & out)
|
||||||
|
{
|
||||||
|
std::string::const_iterator it = input.begin();
|
||||||
|
const auto end = input.end();
|
||||||
|
return common_json_parse(it, end, healing_marker, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool common_json_parse(
|
||||||
|
std::string::const_iterator & it,
|
||||||
|
const std::string::const_iterator & end,
|
||||||
|
const std::string & healing_marker,
|
||||||
|
common_json & out)
|
||||||
|
{
|
||||||
|
// // https://json.nlohmann.me/features/parsing/sax_interface/
|
||||||
|
struct json_error_locator : public nlohmann::json_sax<json> {
|
||||||
|
std::size_t position;
|
||||||
|
bool found_error;
|
||||||
|
std::string last_token;
|
||||||
|
std::string exception_message;
|
||||||
|
std::vector<common_json_stack_element> stack;
|
||||||
|
|
||||||
|
json_error_locator() : position(0), found_error(false) {}
|
||||||
|
|
||||||
|
bool parse_error(std::size_t position, const std::string & last_token, const json::exception & ex) override { // NOLINT
|
||||||
|
this->position = position - 1;
|
||||||
|
this->found_error = true;
|
||||||
|
this->last_token = last_token;
|
||||||
|
this->exception_message = ex.what();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
void close_value() {
|
||||||
|
if (!stack.empty() && (stack.back().type == COMMON_JSON_STACK_ELEMENT_KEY)) {
|
||||||
|
stack.pop_back();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bool null() override { // NOLINT
|
||||||
|
close_value();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
bool boolean(bool) override { // NOLINT
|
||||||
|
close_value();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
bool number_integer(number_integer_t) override { // NOLINT
|
||||||
|
close_value();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
bool number_unsigned(number_unsigned_t) override { // NOLINT
|
||||||
|
close_value();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
bool number_float(number_float_t, const string_t &) override { // NOLINT
|
||||||
|
close_value();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
bool string(string_t &) override { // NOLINT
|
||||||
|
close_value();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
bool binary(binary_t &) override { // NOLINT
|
||||||
|
close_value();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
bool start_object(std::size_t) override { // NOLINT
|
||||||
|
stack.push_back({COMMON_JSON_STACK_ELEMENT_OBJECT, ""});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
bool end_object() override {
|
||||||
|
GGML_ASSERT(!stack.empty() && stack.back().type == COMMON_JSON_STACK_ELEMENT_OBJECT);
|
||||||
|
stack.pop_back();
|
||||||
|
close_value();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
bool key(string_t & key) override { // NOLINT
|
||||||
|
stack.push_back({COMMON_JSON_STACK_ELEMENT_KEY, key});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
bool start_array(std::size_t) override { // NOLINT
|
||||||
|
stack.push_back({COMMON_JSON_STACK_ELEMENT_ARRAY, ""});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
bool end_array() override {
|
||||||
|
GGML_ASSERT(!stack.empty() && stack.back().type == COMMON_JSON_STACK_ELEMENT_ARRAY);
|
||||||
|
stack.pop_back();
|
||||||
|
close_value();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
json_error_locator err_loc;
|
||||||
|
auto start = it;
|
||||||
|
json::sax_parse(it, end, &err_loc);
|
||||||
|
|
||||||
|
if (err_loc.found_error) {
|
||||||
|
it = start;
|
||||||
|
auto temptative_end = it + err_loc.position;
|
||||||
|
// LOG_DBG("Error at position %zu (is_end = %s): %s\n", err_loc.position, temptative_end == end ? "true" : "false", err_loc.exception_message.c_str());
|
||||||
|
|
||||||
|
auto input = std::string(it, temptative_end);
|
||||||
|
try {
|
||||||
|
out.json = json::parse(input);
|
||||||
|
// out.json = json::parse(it, temptative_end);
|
||||||
|
it = temptative_end;
|
||||||
|
return true;
|
||||||
|
} catch (const std::exception & ex) {
|
||||||
|
// No, needs healing.
|
||||||
|
LOG_DBG("Failed to parse up to error: %s: <<<%s>>>\n", ex.what(), std::string(it, temptative_end).c_str());
|
||||||
|
}
|
||||||
|
auto can_parse = [](const std::string & str) {
|
||||||
|
try {
|
||||||
|
auto _ = json::parse(str); // NOLINT
|
||||||
|
return true;
|
||||||
|
} catch (const std::exception &) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if (!healing_marker.empty() && !err_loc.stack.empty()) {
|
||||||
|
std::string str(it, temptative_end);
|
||||||
|
auto last_non_sp_pos = str.find_last_not_of(" \n\r\t");
|
||||||
|
if (last_non_sp_pos == std::string::npos) {
|
||||||
|
throw std::runtime_error("Cannot heal a truncated JSON that stopped in an unknown location");
|
||||||
|
}
|
||||||
|
auto last_non_sp_char = str[last_non_sp_pos];
|
||||||
|
// Used to detect stops on a number, which may not be complete.
|
||||||
|
auto was_maybe_number = [&]() {
|
||||||
|
if (!str.empty() && std::isspace(str.back())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return std::isdigit(last_non_sp_char) ||
|
||||||
|
last_non_sp_char == '.' ||
|
||||||
|
last_non_sp_char == 'e' ||
|
||||||
|
last_non_sp_char == 'E' ||
|
||||||
|
last_non_sp_char == '-';
|
||||||
|
};
|
||||||
|
|
||||||
|
std::string closing;
|
||||||
|
for (size_t i = err_loc.stack.size(); i > 0; i--) {
|
||||||
|
auto & el = err_loc.stack[i - 1];
|
||||||
|
if (el.type == COMMON_JSON_STACK_ELEMENT_OBJECT) {
|
||||||
|
closing += "}";
|
||||||
|
} else if (el.type == COMMON_JSON_STACK_ELEMENT_ARRAY) {
|
||||||
|
closing += "]";
|
||||||
|
} else if (el.type != COMMON_JSON_STACK_ELEMENT_KEY) {
|
||||||
|
throw std::runtime_error("Unexpected stack element type");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto & magic_seed = out.healing_marker.marker = healing_marker;//"$llama.cpp.json$";
|
||||||
|
|
||||||
|
if (err_loc.stack.back().type == COMMON_JSON_STACK_ELEMENT_KEY) {
|
||||||
|
// We're inside an object value
|
||||||
|
if (last_non_sp_char == ':' && can_parse(str + "1" + closing)) {
|
||||||
|
// Was about to create an object value
|
||||||
|
str += (out.healing_marker.json_dump_marker = "\"" + magic_seed) + "\"" + closing;
|
||||||
|
} else if (can_parse(str + ": 1" + closing)) {
|
||||||
|
str += (out.healing_marker.json_dump_marker = ":\"" + magic_seed) + "\"" + closing;
|
||||||
|
} else if (last_non_sp_char == '{' && can_parse(str + closing)) {
|
||||||
|
// Was about to create an object
|
||||||
|
str += (out.healing_marker.json_dump_marker = "\"" + magic_seed) + "\": 1" + closing;
|
||||||
|
} else if (can_parse(str + "\"" + closing)) {
|
||||||
|
// Was inside an object value string
|
||||||
|
str += (out.healing_marker.json_dump_marker = magic_seed) + "\"" + closing;
|
||||||
|
} else if (str[str.length() - 1] == '\\' && can_parse(str + "\\\"" + closing)) {
|
||||||
|
// Was inside an object value string after an escape
|
||||||
|
str += (out.healing_marker.json_dump_marker = "\\" + magic_seed) + "\"" + closing;
|
||||||
|
} else {
|
||||||
|
// find last :
|
||||||
|
auto last_pos = str.find_last_of(':');
|
||||||
|
if (last_pos == std::string::npos) {
|
||||||
|
throw std::runtime_error("Cannot heal a truncated JSON that stopped in an unknown location");
|
||||||
|
}
|
||||||
|
// Cutting back to opening : for object value
|
||||||
|
str = str.substr(0, last_pos + 1) + (out.healing_marker.json_dump_marker = "\"" + magic_seed) + "\"" + closing;
|
||||||
|
}
|
||||||
|
} else if (err_loc.stack.back().type == COMMON_JSON_STACK_ELEMENT_ARRAY) {
|
||||||
|
if ((last_non_sp_char == ',' || last_non_sp_char == '[') && can_parse(str + "1" + closing)) {
|
||||||
|
// Was about to create an array value
|
||||||
|
str += (out.healing_marker.json_dump_marker = "\"" + magic_seed) + "\"" + closing;
|
||||||
|
} else if (can_parse(str + "\"" + closing)) {
|
||||||
|
// Was inside an array value string
|
||||||
|
str += (out.healing_marker.json_dump_marker = magic_seed) + "\"" + closing;
|
||||||
|
} else if (str[str.length() - 1] == '\\' && can_parse(str + "\\\"" + closing)) {
|
||||||
|
// Was inside an array value string after an escape
|
||||||
|
str += (out.healing_marker.json_dump_marker = "\\" + magic_seed) + "\"" + closing;
|
||||||
|
} else if (!was_maybe_number() && can_parse(str + ", 1" + closing)) {
|
||||||
|
// Had just finished a value
|
||||||
|
str += (out.healing_marker.json_dump_marker = ",\"" + magic_seed) + "\"" + closing;
|
||||||
|
} else {
|
||||||
|
auto last_pos = str.find_last_of("[,");
|
||||||
|
if (last_pos == std::string::npos) {
|
||||||
|
throw std::runtime_error("Cannot heal a truncated JSON array stopped in an unknown location");
|
||||||
|
}
|
||||||
|
// Cutting back to last [ or , for array value
|
||||||
|
str = str.substr(0, last_pos + 1) + (out.healing_marker.json_dump_marker = "\"" + magic_seed) + "\"" + closing;
|
||||||
|
}
|
||||||
|
} else if (err_loc.stack.back().type == COMMON_JSON_STACK_ELEMENT_OBJECT) {
|
||||||
|
if ((last_non_sp_char == '{' && can_parse(str + closing)) ||
|
||||||
|
(last_non_sp_char == ',' && can_parse(str + "\"\": 1" + closing))) {
|
||||||
|
// Was about to create an object key+value
|
||||||
|
str += (out.healing_marker.json_dump_marker = "\"" + magic_seed) + "\": 1" + closing;
|
||||||
|
} else if (!was_maybe_number() && can_parse(str + ",\"\": 1" + closing)) {
|
||||||
|
// Was about to create an object key+value
|
||||||
|
str += (out.healing_marker.json_dump_marker = ",\"" + magic_seed) + "\": 1" + closing;
|
||||||
|
} else if (can_parse(str + "\": 1" + closing)) {
|
||||||
|
// Was inside an object key string
|
||||||
|
str += (out.healing_marker.json_dump_marker = magic_seed) + "\": 1" + closing;
|
||||||
|
} else if (str[str.length() - 1] == '\\' && can_parse(str + "\\\": 1" + closing)) {
|
||||||
|
// Was inside an object key string after an escape
|
||||||
|
str += (out.healing_marker.json_dump_marker = "\\" + magic_seed) + "\": 1" + closing;
|
||||||
|
} else {
|
||||||
|
auto last_pos = str.find_last_of(':');
|
||||||
|
if (last_pos == std::string::npos) {
|
||||||
|
throw std::runtime_error("Cannot heal a truncated JSON object stopped in an unknown location");
|
||||||
|
}
|
||||||
|
// fprintf(stderr, "Cutting back to last : for object key+value\n");
|
||||||
|
str = str.substr(0, last_pos + 1) + (out.healing_marker.json_dump_marker = "\"" + magic_seed) + "\"" + closing;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error("Cannot heal a truncated JSON object stopped in an unknown location");
|
||||||
|
}
|
||||||
|
// fprintf(stderr, "HEALED:\nSTRING <<<\n%s\n>>>\n\nmagic_cut: <<<\n%s\n>>>\n\n", str.c_str(), out.healing_marker.json_dump_marker.c_str());
|
||||||
|
out.json = json::parse(str);
|
||||||
|
it = temptative_end;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
// TODO: handle unclosed top-level primitive if the stack was empty but we got an error (e.g. "tru", "\"", etc...)
|
||||||
|
// fprintf(stderr, "Closing: TODO\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
out.json = json::parse(it, end);
|
||||||
|
it = end;
|
||||||
|
return true;
|
||||||
|
}
|
38
common/json-partial.h
Normal file
38
common/json-partial.h
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
|
// Healing marker (empty if the JSON was fully parsed / wasn't healed).
|
||||||
|
struct common_healing_marker {
|
||||||
|
// Raw marker.
|
||||||
|
std::string marker;
|
||||||
|
|
||||||
|
// Cutting the `common_json.json.dump()` string at the (only) occurrence of this marker should yield the original partial JSON string (modulo spaces / if it had the same dump format).
|
||||||
|
std::string json_dump_marker;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Represents a parsed JSON object, with its optional healing marker (a JSON dump fragment that can be used to find the position of healing in the JSON dump string)
|
||||||
|
struct common_json {
|
||||||
|
nlohmann::ordered_json json;
|
||||||
|
|
||||||
|
common_healing_marker healing_marker;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Parse the JSON string, healing (closing) any partial JSON if `healing_marker` is not empty.
|
||||||
|
//
|
||||||
|
// Healing completes partial JSON strings by adding a (possibly modified) healing marker, then whatever is needed to close the JSON.
|
||||||
|
// This allows to parse the resulting healed JSON string, yet be able to cut it again if needed at the healing marker.
|
||||||
|
// (this is used when parsing JSON outputs from the models, then crafting partial JSONs for the partial tool calls in OAI format).
|
||||||
|
//
|
||||||
|
// For instance, parsing `{` with a healing marker `foo` will produce a healed JSON `{"foo":1}`, w/ json_dump_marker = `"foo"` (which can be used to break the JSON again).
|
||||||
|
bool common_json_parse(
|
||||||
|
const std::string & input,
|
||||||
|
const std::string & healing_marker,
|
||||||
|
common_json & out);
|
||||||
|
|
||||||
|
// Parse the JSON string (see overload above), but advancing an iterator to the end of the input when the (potentially partial) parsing succeeds.
|
||||||
|
bool common_json_parse(
|
||||||
|
std::string::const_iterator & it,
|
||||||
|
const std::string::const_iterator & end,
|
||||||
|
const std::string & healing_marker,
|
||||||
|
common_json & out);
|
@ -1,8 +1,9 @@
|
|||||||
#include "json-schema-to-grammar.h"
|
#include "json-schema-to-grammar.h"
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
|
||||||
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <fstream>
|
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <regex>
|
#include <regex>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
@ -16,6 +17,9 @@ using json = nlohmann::ordered_json;
|
|||||||
static std::string build_repetition(const std::string & item_rule, int min_items, int max_items, const std::string & separator_rule = "") {
|
static std::string build_repetition(const std::string & item_rule, int min_items, int max_items, const std::string & separator_rule = "") {
|
||||||
auto has_max = max_items != std::numeric_limits<int>::max();
|
auto has_max = max_items != std::numeric_limits<int>::max();
|
||||||
|
|
||||||
|
if (max_items == 0) {
|
||||||
|
return "";
|
||||||
|
}
|
||||||
if (min_items == 0 && max_items == 1) {
|
if (min_items == 0 && max_items == 1) {
|
||||||
return item_rule + "?";
|
return item_rule + "?";
|
||||||
}
|
}
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "ggml.h"
|
#include <nlohmann/json_fwd.hpp>
|
||||||
// Change JSON_ASSERT from assert() to GGML_ASSERT:
|
|
||||||
#define JSON_ASSERT GGML_ASSERT
|
#include <functional>
|
||||||
#include "json.hpp"
|
#include <string>
|
||||||
|
|
||||||
std::string json_schema_to_grammar(const nlohmann::ordered_json & schema,
|
std::string json_schema_to_grammar(const nlohmann::ordered_json & schema,
|
||||||
bool force_gbnf = false);
|
bool force_gbnf = false);
|
||||||
|
@ -189,6 +189,7 @@ static LlgTokenizer * llama_sampler_llg_new_tokenizer(const llama_vocab * vocab)
|
|||||||
/* .tokenize_fn = */ llama_sampler_llg_tokenize_fn,
|
/* .tokenize_fn = */ llama_sampler_llg_tokenize_fn,
|
||||||
/* .use_approximate_greedy_tokenize_fn = */ false,
|
/* .use_approximate_greedy_tokenize_fn = */ false,
|
||||||
/* .tokenize_user_data = */ vocab,
|
/* .tokenize_user_data = */ vocab,
|
||||||
|
/* .slices = */ nullptr,
|
||||||
};
|
};
|
||||||
|
|
||||||
char error_buffer[1024];
|
char error_buffer[1024];
|
||||||
|
204
common/regex-partial.cpp
Normal file
204
common/regex-partial.cpp
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
#include "regex-partial.h"
|
||||||
|
#include "common.h"
|
||||||
|
#include <functional>
|
||||||
|
#include <optional>
|
||||||
|
|
||||||
|
common_regex::common_regex(const std::string & pattern) :
|
||||||
|
pattern(pattern),
|
||||||
|
rx(pattern),
|
||||||
|
rx_reversed_partial(regex_to_reversed_partial_regex(pattern)) {}
|
||||||
|
|
||||||
|
common_regex_match common_regex::search(const std::string & input, size_t pos, bool as_match) const {
|
||||||
|
std::smatch match;
|
||||||
|
if (pos > input.size()) {
|
||||||
|
throw std::runtime_error("Position out of bounds");
|
||||||
|
}
|
||||||
|
auto start = input.begin() + pos;
|
||||||
|
auto found = as_match
|
||||||
|
? std::regex_match(start, input.end(), match, rx)
|
||||||
|
: std::regex_search(start, input.end(), match, rx);
|
||||||
|
if (found) {
|
||||||
|
common_regex_match res;
|
||||||
|
res.type = COMMON_REGEX_MATCH_TYPE_FULL;
|
||||||
|
for (size_t i = 0; i < match.size(); ++i) {
|
||||||
|
auto begin = pos + match.position(i);
|
||||||
|
res.groups.emplace_back(begin, begin + match.length(i));
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
std::match_results<std::string::const_reverse_iterator> srmatch;
|
||||||
|
if (std::regex_match(input.rbegin(), input.rend() - pos, srmatch, rx_reversed_partial)) {
|
||||||
|
auto group = srmatch[1].str();
|
||||||
|
if (group.length() != 0) {
|
||||||
|
auto it = srmatch[1].second.base();
|
||||||
|
// auto position = static_cast<size_t>(std::distance(input.begin(), it));
|
||||||
|
if ((!as_match) || it == input.begin()) {
|
||||||
|
common_regex_match res;
|
||||||
|
res.type = COMMON_REGEX_MATCH_TYPE_PARTIAL;
|
||||||
|
const size_t begin = std::distance(input.begin(), it);
|
||||||
|
const size_t end = input.size();
|
||||||
|
if (begin == std::string::npos || end == std::string::npos || begin > end) {
|
||||||
|
throw std::runtime_error("Invalid range");
|
||||||
|
}
|
||||||
|
res.groups.push_back({begin, end});
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Transforms a regex pattern to a partial match pattern that operates on a reversed input string to find partial final matches of the original pattern.
|
||||||
|
|
||||||
|
Ideally we'd like to use boost::match_partial (https://beta.boost.org/doc/libs/1_59_0/libs/regex/doc/html/boost_regex/partial_matches.html)
|
||||||
|
to see if a string ends with a partial regex match, but but it's not in std::regex yet.
|
||||||
|
Instead, we'll the regex into a partial match regex operating as a full match on the reverse iterators of the input.
|
||||||
|
|
||||||
|
- /abcd/ -> (dcba|cba|ba|a).* -> ((?:(?:(?:(?:d)?c)?b)?a).*
|
||||||
|
- /a|b/ -> (a|b).*
|
||||||
|
- /a*?/ -> error, could match ""
|
||||||
|
- /a*b/ -> ((?:b)?a*+).* (final repetitions become eager)
|
||||||
|
- /.*?ab/ -> ((?:b)?a).* (merge .*)
|
||||||
|
- /a.*?b/ -> ((?:b)?.*?a).* (keep reluctant matches)
|
||||||
|
- /a(bc)d/ -> ((?:(?:d)?(?:(?:c)?b))?a).*
|
||||||
|
- /a(bc|de)/ -> ((?:(?:(?:e)?d)?|(?:(?:c)?b)?)?a).*
|
||||||
|
- /ab{2,4}c/ -> abbb?b?c -> ((?:(?:(?:(?:(?:c)?b)?b)?b?)?b?)?a).*
|
||||||
|
|
||||||
|
The regex will match a reversed string fully, and the end of the first (And only) capturing group will indicate the reversed start of the original partial pattern
|
||||||
|
(i.e. just where the final .* starts in the inverted pattern; all other groups are turned into non-capturing groups, and reluctant quantifiers are ignored)
|
||||||
|
*/
|
||||||
|
std::string regex_to_reversed_partial_regex(const std::string & pattern) {
|
||||||
|
auto it = pattern.begin();
|
||||||
|
const auto end = pattern.end();
|
||||||
|
|
||||||
|
std::function<std::string()> process = [&]() {
|
||||||
|
std::vector<std::vector<std::string>> alternatives(1);
|
||||||
|
std::vector<std::string> * sequence = &alternatives.back();
|
||||||
|
|
||||||
|
while (it != end) {
|
||||||
|
if (*it == '[') {
|
||||||
|
auto start = it;
|
||||||
|
++it;
|
||||||
|
while (it != end) {
|
||||||
|
if ((*it == '\\') && (++it != end)) {
|
||||||
|
++it;
|
||||||
|
} else if ((it != end) && (*it == ']')) {
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
++it;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (it == end) {
|
||||||
|
throw std::runtime_error("Unmatched '[' in pattern");
|
||||||
|
}
|
||||||
|
++it;
|
||||||
|
sequence->push_back(std::string(start, it));
|
||||||
|
} else if (*it == '*' || *it == '?' || *it == '+') {
|
||||||
|
if (sequence->empty()) {
|
||||||
|
throw std::runtime_error("Quantifier without preceding element");
|
||||||
|
}
|
||||||
|
sequence->back() += *it;
|
||||||
|
auto is_star = *it == '*';
|
||||||
|
++it;
|
||||||
|
if (is_star) {
|
||||||
|
if (*it == '?') {
|
||||||
|
++it;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (*it == '{') {
|
||||||
|
if (sequence->empty()) {
|
||||||
|
throw std::runtime_error("Repetition without preceding element");
|
||||||
|
}
|
||||||
|
++it;
|
||||||
|
auto start = it;
|
||||||
|
while (it != end && *it != '}') {
|
||||||
|
++it;
|
||||||
|
}
|
||||||
|
if (it == end) {
|
||||||
|
throw std::runtime_error("Unmatched '{' in pattern");
|
||||||
|
}
|
||||||
|
auto parts = string_split(std::string(start, it), ",");
|
||||||
|
++it;
|
||||||
|
if (parts.size() > 2) {
|
||||||
|
throw std::runtime_error("Invalid repetition range in pattern");
|
||||||
|
}
|
||||||
|
|
||||||
|
auto parseOptInt = [&](const std::string & s, const std::optional<int> & def = std::nullopt) -> std::optional<int> {
|
||||||
|
if (s.empty()) {
|
||||||
|
return def;
|
||||||
|
}
|
||||||
|
return std::stoi(s);
|
||||||
|
};
|
||||||
|
auto min = parseOptInt(parts[0], 0);
|
||||||
|
auto max = parts.size() == 1 ? min : parseOptInt(parts[1]);
|
||||||
|
if (min && max && *max < *min) {
|
||||||
|
throw std::runtime_error("Invalid repetition range in pattern");
|
||||||
|
}
|
||||||
|
// Brutal but... let's repeat at least min times, then ? for the delta between min & max (or * for unbounded)
|
||||||
|
auto part = sequence->back();
|
||||||
|
sequence->pop_back();
|
||||||
|
for (int i = 0; i < *min; i++) {
|
||||||
|
sequence->push_back(part);
|
||||||
|
}
|
||||||
|
if (max) {
|
||||||
|
for (int i = *min; i < *max; i++) {
|
||||||
|
sequence->push_back(part + "?");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
sequence->push_back(part + "*");
|
||||||
|
}
|
||||||
|
} else if (*it == '(') {
|
||||||
|
++it;
|
||||||
|
if (it != end && *it == '?' && (it + 1 != end) && *(it + 1) == ':') {
|
||||||
|
it += 2;
|
||||||
|
}
|
||||||
|
auto sub = process();
|
||||||
|
if (*it != ')') {
|
||||||
|
throw std::runtime_error("Unmatched '(' in pattern");
|
||||||
|
}
|
||||||
|
++it;
|
||||||
|
auto & part = sequence->emplace_back("(?:");
|
||||||
|
part += sub;
|
||||||
|
part += ")";
|
||||||
|
} else if (*it == ')') {
|
||||||
|
break;
|
||||||
|
} else if (*it == '|') {
|
||||||
|
++it;
|
||||||
|
alternatives.emplace_back();
|
||||||
|
sequence = &alternatives.back();
|
||||||
|
} else if (*it == '\\' && (++it != end)) {
|
||||||
|
auto str = std::string("\\") + *it;
|
||||||
|
sequence->push_back(str);
|
||||||
|
++it;
|
||||||
|
} else if (it != end) {
|
||||||
|
sequence->push_back(std::string(1, *it));
|
||||||
|
++it;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// /abcd/ -> (dcba|cba|ba|a).* -> ((?:(?:(?:d)?c)?b)?a).*
|
||||||
|
// if n(=4) parts, opening n-1(=3) non-capturing groups after the 1 capturing group
|
||||||
|
// We'll do the outermost capturing group and final .* in the enclosing function.
|
||||||
|
std::vector<std::string> res_alts;
|
||||||
|
for (const auto & parts : alternatives) {
|
||||||
|
auto & res = res_alts.emplace_back();
|
||||||
|
for (size_t i = 0; i < parts.size() - 1; i++) {
|
||||||
|
res += "(?:";
|
||||||
|
}
|
||||||
|
for (auto it = parts.rbegin(); it != parts.rend(); ++it) {
|
||||||
|
res += *it;
|
||||||
|
if (it != parts.rend() - 1) {
|
||||||
|
res += ")?";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return string_join(res_alts, "|");
|
||||||
|
};
|
||||||
|
auto res = process();
|
||||||
|
if (it != end) {
|
||||||
|
throw std::runtime_error("Unmatched '(' in pattern");
|
||||||
|
}
|
||||||
|
|
||||||
|
return "(" + res + ")[\\s\\S]*";
|
||||||
|
}
|
56
common/regex-partial.h
Normal file
56
common/regex-partial.h
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <regex>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
enum common_regex_match_type {
|
||||||
|
COMMON_REGEX_MATCH_TYPE_NONE,
|
||||||
|
COMMON_REGEX_MATCH_TYPE_PARTIAL,
|
||||||
|
COMMON_REGEX_MATCH_TYPE_FULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct common_string_range {
|
||||||
|
size_t begin;
|
||||||
|
size_t end;
|
||||||
|
common_string_range(size_t begin, size_t end) : begin(begin), end(end) {
|
||||||
|
if (begin > end) {
|
||||||
|
throw std::runtime_error("Invalid range");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// prevent default ctor
|
||||||
|
common_string_range() = delete;
|
||||||
|
bool empty() const {
|
||||||
|
return begin == end;
|
||||||
|
}
|
||||||
|
bool operator==(const common_string_range & other) const {
|
||||||
|
return begin == other.begin && end == other.end;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct common_regex_match {
|
||||||
|
common_regex_match_type type = COMMON_REGEX_MATCH_TYPE_NONE;
|
||||||
|
std::vector<common_string_range> groups;
|
||||||
|
|
||||||
|
bool operator==(const common_regex_match & other) const {
|
||||||
|
return type == other.type && groups == other.groups;
|
||||||
|
}
|
||||||
|
bool operator!=(const common_regex_match & other) const {
|
||||||
|
return !(*this == other);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class common_regex {
|
||||||
|
std::string pattern;
|
||||||
|
std::regex rx;
|
||||||
|
std::regex rx_reversed_partial;
|
||||||
|
|
||||||
|
public:
|
||||||
|
explicit common_regex(const std::string & pattern);
|
||||||
|
|
||||||
|
common_regex_match search(const std::string & input, size_t pos, bool as_match = false) const;
|
||||||
|
|
||||||
|
const std::string & str() const { return pattern; }
|
||||||
|
};
|
||||||
|
|
||||||
|
// For testing only (pretty print of failures).
|
||||||
|
std::string regex_to_reversed_partial_regex(const std::string & pattern);
|
@ -1,6 +1,7 @@
|
|||||||
#include "sampling.h"
|
#include "sampling.h"
|
||||||
|
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
#include "log.h"
|
||||||
|
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
@ -160,7 +161,7 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co
|
|||||||
GGML_ABORT("llguidance (cmake -DLLAMA_LLGUIDANCE=ON) is not enabled");
|
GGML_ABORT("llguidance (cmake -DLLAMA_LLGUIDANCE=ON) is not enabled");
|
||||||
#endif // LLAMA_USE_LLGUIDANCE
|
#endif // LLAMA_USE_LLGUIDANCE
|
||||||
} else {
|
} else {
|
||||||
std::vector<std::string> patterns_at_start;
|
std::vector<std::string> trigger_patterns;
|
||||||
std::vector<std::string> patterns_anywhere;
|
std::vector<std::string> patterns_anywhere;
|
||||||
std::vector<llama_token> trigger_tokens;
|
std::vector<llama_token> trigger_tokens;
|
||||||
for (const auto & trigger : params.grammar_triggers) {
|
for (const auto & trigger : params.grammar_triggers) {
|
||||||
@ -172,10 +173,13 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN:
|
case COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN:
|
||||||
case COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_START:
|
|
||||||
{
|
{
|
||||||
const auto & pattern = trigger.value;
|
patterns_anywhere.push_back(trigger.value);
|
||||||
(trigger.type == COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_START ? patterns_at_start : patterns_anywhere).push_back(pattern);
|
break;
|
||||||
|
}
|
||||||
|
case COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL:
|
||||||
|
{
|
||||||
|
trigger_patterns.push_back(trigger.value);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN:
|
case COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN:
|
||||||
@ -189,10 +193,6 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::string> trigger_patterns;
|
|
||||||
if (!patterns_at_start.empty()) {
|
|
||||||
trigger_patterns.push_back("^(" + string_join(patterns_at_start, "|") + ")[\\s\\S]*");
|
|
||||||
}
|
|
||||||
if (!patterns_anywhere.empty()) {
|
if (!patterns_anywhere.empty()) {
|
||||||
trigger_patterns.push_back("^[\\s\\S]*?(" + string_join(patterns_anywhere, "|") + ")[\\s\\S]*");
|
trigger_patterns.push_back("^[\\s\\S]*?(" + string_join(patterns_anywhere, "|") + ")[\\s\\S]*");
|
||||||
}
|
}
|
||||||
@ -229,51 +229,48 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co
|
|||||||
params.logit_bias.data()));
|
params.logit_bias.data()));
|
||||||
|
|
||||||
if (params.mirostat == 0) {
|
if (params.mirostat == 0) {
|
||||||
if (params.top_n_sigma >= 0) {
|
for (const auto & cnstr : params.samplers) {
|
||||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k));
|
switch (cnstr) {
|
||||||
llama_sampler_chain_add(result->chain, llama_sampler_init_temp (params.temp));
|
case COMMON_SAMPLER_TYPE_DRY:
|
||||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_n_sigma (params.top_n_sigma));
|
{
|
||||||
} else {
|
std::vector<const char *> c_breakers;
|
||||||
for (const auto & cnstr : params.samplers) {
|
c_breakers.reserve(params.dry_sequence_breakers.size());
|
||||||
switch (cnstr) {
|
for (const auto & str : params.dry_sequence_breakers) {
|
||||||
case COMMON_SAMPLER_TYPE_DRY:
|
c_breakers.push_back(str.c_str());
|
||||||
{
|
|
||||||
std::vector<const char *> c_breakers;
|
|
||||||
c_breakers.reserve(params.dry_sequence_breakers.size());
|
|
||||||
for (const auto & str : params.dry_sequence_breakers) {
|
|
||||||
c_breakers.push_back(str.c_str());
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_sampler_chain_add(result->chain, llama_sampler_init_dry (vocab, llama_model_n_ctx_train(model), params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size()));
|
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
case COMMON_SAMPLER_TYPE_TOP_K:
|
llama_sampler_chain_add(result->chain, llama_sampler_init_dry (vocab, llama_model_n_ctx_train(model), params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size()));
|
||||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k));
|
}
|
||||||
break;
|
break;
|
||||||
case COMMON_SAMPLER_TYPE_TOP_P:
|
case COMMON_SAMPLER_TYPE_TOP_K:
|
||||||
llama_sampler_chain_add(result->chain, llama_sampler_init_top_p (params.top_p, params.min_keep));
|
llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k));
|
||||||
break;
|
break;
|
||||||
case COMMON_SAMPLER_TYPE_MIN_P:
|
case COMMON_SAMPLER_TYPE_TOP_P:
|
||||||
llama_sampler_chain_add(result->chain, llama_sampler_init_min_p (params.min_p, params.min_keep));
|
llama_sampler_chain_add(result->chain, llama_sampler_init_top_p (params.top_p, params.min_keep));
|
||||||
break;
|
break;
|
||||||
case COMMON_SAMPLER_TYPE_XTC:
|
case COMMON_SAMPLER_TYPE_TOP_N_SIGMA:
|
||||||
llama_sampler_chain_add(result->chain, llama_sampler_init_xtc (params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed));
|
llama_sampler_chain_add(result->chain, llama_sampler_init_top_n_sigma (params.top_n_sigma));
|
||||||
break;
|
break;
|
||||||
case COMMON_SAMPLER_TYPE_TYPICAL_P:
|
case COMMON_SAMPLER_TYPE_MIN_P:
|
||||||
llama_sampler_chain_add(result->chain, llama_sampler_init_typical (params.typ_p, params.min_keep));
|
llama_sampler_chain_add(result->chain, llama_sampler_init_min_p (params.min_p, params.min_keep));
|
||||||
break;
|
break;
|
||||||
case COMMON_SAMPLER_TYPE_TEMPERATURE:
|
case COMMON_SAMPLER_TYPE_XTC:
|
||||||
llama_sampler_chain_add(result->chain, llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent));
|
llama_sampler_chain_add(result->chain, llama_sampler_init_xtc (params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed));
|
||||||
break;
|
break;
|
||||||
case COMMON_SAMPLER_TYPE_INFILL:
|
case COMMON_SAMPLER_TYPE_TYPICAL_P:
|
||||||
llama_sampler_chain_add(result->chain, llama_sampler_init_infill (vocab));
|
llama_sampler_chain_add(result->chain, llama_sampler_init_typical (params.typ_p, params.min_keep));
|
||||||
break;
|
break;
|
||||||
case COMMON_SAMPLER_TYPE_PENALTIES:
|
case COMMON_SAMPLER_TYPE_TEMPERATURE:
|
||||||
llama_sampler_chain_add(result->chain, llama_sampler_init_penalties(params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present));
|
llama_sampler_chain_add(result->chain, llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent));
|
||||||
break;
|
break;
|
||||||
default:
|
case COMMON_SAMPLER_TYPE_INFILL:
|
||||||
GGML_ASSERT(false && "unknown sampler type");
|
llama_sampler_chain_add(result->chain, llama_sampler_init_infill (vocab));
|
||||||
}
|
break;
|
||||||
|
case COMMON_SAMPLER_TYPE_PENALTIES:
|
||||||
|
llama_sampler_chain_add(result->chain, llama_sampler_init_penalties (params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present));
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false && "unknown sampler type");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
llama_sampler_chain_add(result->chain, llama_sampler_init_dist(params.seed));
|
llama_sampler_chain_add(result->chain, llama_sampler_init_dist(params.seed));
|
||||||
@ -475,6 +472,7 @@ char common_sampler_type_to_chr(enum common_sampler_type cnstr) {
|
|||||||
case COMMON_SAMPLER_TYPE_TOP_K: return 'k';
|
case COMMON_SAMPLER_TYPE_TOP_K: return 'k';
|
||||||
case COMMON_SAMPLER_TYPE_TYPICAL_P: return 'y';
|
case COMMON_SAMPLER_TYPE_TYPICAL_P: return 'y';
|
||||||
case COMMON_SAMPLER_TYPE_TOP_P: return 'p';
|
case COMMON_SAMPLER_TYPE_TOP_P: return 'p';
|
||||||
|
case COMMON_SAMPLER_TYPE_TOP_N_SIGMA: return 's';
|
||||||
case COMMON_SAMPLER_TYPE_MIN_P: return 'm';
|
case COMMON_SAMPLER_TYPE_MIN_P: return 'm';
|
||||||
case COMMON_SAMPLER_TYPE_TEMPERATURE: return 't';
|
case COMMON_SAMPLER_TYPE_TEMPERATURE: return 't';
|
||||||
case COMMON_SAMPLER_TYPE_XTC: return 'x';
|
case COMMON_SAMPLER_TYPE_XTC: return 'x';
|
||||||
@ -490,6 +488,7 @@ std::string common_sampler_type_to_str(enum common_sampler_type cnstr) {
|
|||||||
case COMMON_SAMPLER_TYPE_TOP_K: return "top_k";
|
case COMMON_SAMPLER_TYPE_TOP_K: return "top_k";
|
||||||
case COMMON_SAMPLER_TYPE_TYPICAL_P: return "typ_p";
|
case COMMON_SAMPLER_TYPE_TYPICAL_P: return "typ_p";
|
||||||
case COMMON_SAMPLER_TYPE_TOP_P: return "top_p";
|
case COMMON_SAMPLER_TYPE_TOP_P: return "top_p";
|
||||||
|
case COMMON_SAMPLER_TYPE_TOP_N_SIGMA: return "top_n_sigma";
|
||||||
case COMMON_SAMPLER_TYPE_MIN_P: return "min_p";
|
case COMMON_SAMPLER_TYPE_MIN_P: return "min_p";
|
||||||
case COMMON_SAMPLER_TYPE_TEMPERATURE: return "temperature";
|
case COMMON_SAMPLER_TYPE_TEMPERATURE: return "temperature";
|
||||||
case COMMON_SAMPLER_TYPE_XTC: return "xtc";
|
case COMMON_SAMPLER_TYPE_XTC: return "xtc";
|
||||||
@ -504,6 +503,7 @@ std::vector<common_sampler_type> common_sampler_types_from_names(const std::vect
|
|||||||
{ "dry", COMMON_SAMPLER_TYPE_DRY },
|
{ "dry", COMMON_SAMPLER_TYPE_DRY },
|
||||||
{ "top_k", COMMON_SAMPLER_TYPE_TOP_K },
|
{ "top_k", COMMON_SAMPLER_TYPE_TOP_K },
|
||||||
{ "top_p", COMMON_SAMPLER_TYPE_TOP_P },
|
{ "top_p", COMMON_SAMPLER_TYPE_TOP_P },
|
||||||
|
{ "top_n_sigma", COMMON_SAMPLER_TYPE_TOP_N_SIGMA },
|
||||||
{ "typ_p", COMMON_SAMPLER_TYPE_TYPICAL_P },
|
{ "typ_p", COMMON_SAMPLER_TYPE_TYPICAL_P },
|
||||||
{ "min_p", COMMON_SAMPLER_TYPE_MIN_P },
|
{ "min_p", COMMON_SAMPLER_TYPE_MIN_P },
|
||||||
{ "temperature", COMMON_SAMPLER_TYPE_TEMPERATURE },
|
{ "temperature", COMMON_SAMPLER_TYPE_TEMPERATURE },
|
||||||
@ -517,6 +517,7 @@ std::vector<common_sampler_type> common_sampler_types_from_names(const std::vect
|
|||||||
std::unordered_map<std::string, common_sampler_type> sampler_alt_name_map {
|
std::unordered_map<std::string, common_sampler_type> sampler_alt_name_map {
|
||||||
{ "top-k", COMMON_SAMPLER_TYPE_TOP_K },
|
{ "top-k", COMMON_SAMPLER_TYPE_TOP_K },
|
||||||
{ "top-p", COMMON_SAMPLER_TYPE_TOP_P },
|
{ "top-p", COMMON_SAMPLER_TYPE_TOP_P },
|
||||||
|
{ "top-n-sigma", COMMON_SAMPLER_TYPE_TOP_N_SIGMA },
|
||||||
{ "nucleus", COMMON_SAMPLER_TYPE_TOP_P },
|
{ "nucleus", COMMON_SAMPLER_TYPE_TOP_P },
|
||||||
{ "typical-p", COMMON_SAMPLER_TYPE_TYPICAL_P },
|
{ "typical-p", COMMON_SAMPLER_TYPE_TYPICAL_P },
|
||||||
{ "typical", COMMON_SAMPLER_TYPE_TYPICAL_P },
|
{ "typical", COMMON_SAMPLER_TYPE_TYPICAL_P },
|
||||||
@ -533,14 +534,16 @@ std::vector<common_sampler_type> common_sampler_types_from_names(const std::vect
|
|||||||
auto sampler = sampler_canonical_name_map.find(name);
|
auto sampler = sampler_canonical_name_map.find(name);
|
||||||
if (sampler != sampler_canonical_name_map.end()) {
|
if (sampler != sampler_canonical_name_map.end()) {
|
||||||
samplers.push_back(sampler->second);
|
samplers.push_back(sampler->second);
|
||||||
} else {
|
continue;
|
||||||
if (allow_alt_names) {
|
}
|
||||||
sampler = sampler_alt_name_map.find(name);
|
if (allow_alt_names) {
|
||||||
if (sampler != sampler_alt_name_map.end()) {
|
sampler = sampler_alt_name_map.find(name);
|
||||||
samplers.push_back(sampler->second);
|
if (sampler != sampler_alt_name_map.end()) {
|
||||||
}
|
samplers.push_back(sampler->second);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
LOG_WRN("%s: unable to match sampler by name '%s'\n", __func__, name.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
return samplers;
|
return samplers;
|
||||||
@ -552,6 +555,7 @@ std::vector<common_sampler_type> common_sampler_types_from_chars(const std::stri
|
|||||||
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_K), COMMON_SAMPLER_TYPE_TOP_K },
|
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_K), COMMON_SAMPLER_TYPE_TOP_K },
|
||||||
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TYPICAL_P), COMMON_SAMPLER_TYPE_TYPICAL_P },
|
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TYPICAL_P), COMMON_SAMPLER_TYPE_TYPICAL_P },
|
||||||
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_P), COMMON_SAMPLER_TYPE_TOP_P },
|
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_P), COMMON_SAMPLER_TYPE_TOP_P },
|
||||||
|
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_N_SIGMA), COMMON_SAMPLER_TYPE_TOP_N_SIGMA },
|
||||||
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_MIN_P), COMMON_SAMPLER_TYPE_MIN_P },
|
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_MIN_P), COMMON_SAMPLER_TYPE_MIN_P },
|
||||||
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TEMPERATURE), COMMON_SAMPLER_TYPE_TEMPERATURE },
|
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TEMPERATURE), COMMON_SAMPLER_TYPE_TEMPERATURE },
|
||||||
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_XTC), COMMON_SAMPLER_TYPE_XTC },
|
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_XTC), COMMON_SAMPLER_TYPE_XTC },
|
||||||
@ -566,6 +570,8 @@ std::vector<common_sampler_type> common_sampler_types_from_chars(const std::stri
|
|||||||
const auto sampler = sampler_name_map.find(c);
|
const auto sampler = sampler_name_map.find(c);
|
||||||
if (sampler != sampler_name_map.end()) {
|
if (sampler != sampler_name_map.end()) {
|
||||||
samplers.push_back(sampler->second);
|
samplers.push_back(sampler->second);
|
||||||
|
} else {
|
||||||
|
LOG_WRN("%s: unable to match sampler by char '%c'\n", __func__, c);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,6 +144,8 @@ llama_tokens common_speculative_gen_draft(
|
|||||||
auto & smpl = spec->smpl;
|
auto & smpl = spec->smpl;
|
||||||
auto & prompt = spec->prompt;
|
auto & prompt = spec->prompt;
|
||||||
|
|
||||||
|
auto * mem = llama_get_memory(ctx);
|
||||||
|
|
||||||
int reuse_i = 0;
|
int reuse_i = 0;
|
||||||
int reuse_n = 0;
|
int reuse_n = 0;
|
||||||
|
|
||||||
@ -173,7 +175,7 @@ llama_tokens common_speculative_gen_draft(
|
|||||||
result.reserve(params.n_draft);
|
result.reserve(params.n_draft);
|
||||||
|
|
||||||
if (reuse_n == 0) {
|
if (reuse_n == 0) {
|
||||||
llama_kv_self_clear(ctx);
|
llama_memory_clear(mem, false);
|
||||||
|
|
||||||
prompt.clear();
|
prompt.clear();
|
||||||
} else {
|
} else {
|
||||||
@ -192,14 +194,14 @@ llama_tokens common_speculative_gen_draft(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (reuse_i > 0) {
|
if (reuse_i > 0) {
|
||||||
llama_kv_self_seq_rm (ctx, 0, 0, reuse_i);
|
llama_memory_seq_rm (mem, 0, 0, reuse_i);
|
||||||
llama_kv_self_seq_add(ctx, 0, reuse_i, -1, -reuse_i);
|
llama_memory_seq_add(mem, 0, reuse_i, -1, -reuse_i);
|
||||||
|
|
||||||
prompt.erase(prompt.begin(), prompt.begin() + reuse_i);
|
prompt.erase(prompt.begin(), prompt.begin() + reuse_i);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reuse_n < (int) prompt.size()) {
|
if (reuse_n < (int) prompt.size()) {
|
||||||
llama_kv_self_seq_rm (ctx, 0, reuse_n, -1);
|
llama_memory_seq_rm (mem, 0, reuse_n, -1);
|
||||||
|
|
||||||
prompt.erase(prompt.begin() + reuse_n, prompt.end());
|
prompt.erase(prompt.begin() + reuse_n, prompt.end());
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,28 +1,6 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
# This script downloads the tokenizer models of the specified models from Huggingface and
|
|
||||||
# generates the get_vocab_base_pre() function for convert_hf_to_gguf.py
|
|
||||||
#
|
|
||||||
# This is necessary in order to analyze the type of pre-tokenizer used by the model and
|
|
||||||
# provide the necessary information to llama.cpp via the GGUF header in order to implement
|
|
||||||
# the same pre-tokenizer.
|
|
||||||
#
|
|
||||||
# ref: https://github.com/ggml-org/llama.cpp/pull/6920
|
|
||||||
#
|
|
||||||
# Instructions:
|
|
||||||
#
|
|
||||||
# - Add a new model to the "models" list
|
|
||||||
# - Run the script with your huggingface token:
|
|
||||||
#
|
|
||||||
# python3 convert_hf_to_gguf_update.py <huggingface_token>
|
|
||||||
#
|
|
||||||
# - The convert_hf_to_gguf.py script will have had its get_vocab_base_pre() function updated
|
|
||||||
# - Update llama.cpp with the new pre-tokenizer if necessary
|
|
||||||
#
|
|
||||||
# TODO: generate tokenizer tests for llama.cpp
|
|
||||||
#
|
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import pathlib
|
import pathlib
|
||||||
@ -32,6 +10,7 @@ import requests
|
|||||||
import sys
|
import sys
|
||||||
import json
|
import json
|
||||||
import shutil
|
import shutil
|
||||||
|
import argparse
|
||||||
|
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from enum import IntEnum, auto
|
from enum import IntEnum, auto
|
||||||
@ -41,6 +20,11 @@ logging.basicConfig(level=logging.DEBUG)
|
|||||||
logger = logging.getLogger("convert_hf_to_gguf_update")
|
logger = logging.getLogger("convert_hf_to_gguf_update")
|
||||||
sess = requests.Session()
|
sess = requests.Session()
|
||||||
|
|
||||||
|
convert_py_pth = pathlib.Path("convert_hf_to_gguf.py")
|
||||||
|
convert_py = convert_py_pth.read_text(encoding="utf-8")
|
||||||
|
hf_token_pth = pathlib.Path.home() / ".cache" / "huggingface" / "token"
|
||||||
|
hf_token = hf_token_pth.read_text(encoding="utf-8").strip() if hf_token_pth.exists() else None
|
||||||
|
|
||||||
|
|
||||||
class TOKENIZER_TYPE(IntEnum):
|
class TOKENIZER_TYPE(IntEnum):
|
||||||
SPM = auto()
|
SPM = auto()
|
||||||
@ -49,20 +33,49 @@ class TOKENIZER_TYPE(IntEnum):
|
|||||||
UGM = auto()
|
UGM = auto()
|
||||||
|
|
||||||
|
|
||||||
|
DOC_STRING = """
|
||||||
|
This script downloads the tokenizer models of the specified models from Huggingface and
|
||||||
|
generates the get_vocab_base_pre() function for convert_hf_to_gguf.py
|
||||||
|
|
||||||
|
/!\\ It is intended to be used by contributors and is not meant to be run by end users
|
||||||
|
|
||||||
|
This is necessary in order to analyze the type of pre-tokenizer used by the model and
|
||||||
|
provide the necessary information to llama.cpp via the GGUF header in order to implement
|
||||||
|
the same pre-tokenizer.
|
||||||
|
|
||||||
|
ref: https://github.com/ggml-org/llama.cpp/pull/6920
|
||||||
|
|
||||||
|
Instructions:
|
||||||
|
|
||||||
|
- Add a new model to the "models" list
|
||||||
|
- Run the script with your huggingface token
|
||||||
|
By default, token will be read from ~/.cache/huggingface/token
|
||||||
|
- The convert_hf_to_gguf.py script will have had its get_vocab_base_pre() function updated
|
||||||
|
- Update llama.cpp with the new pre-tokenizer if necessary
|
||||||
|
"""
|
||||||
|
# TODO: generate tokenizer tests for llama.cpp
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description=DOC_STRING, formatter_class=argparse.RawTextHelpFormatter)
|
||||||
|
parser.add_argument(
|
||||||
|
"--full", action="store_true",
|
||||||
|
help="download full list of models - make sure you have access to all of them",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"hf_token",
|
||||||
|
help="optional HF token",
|
||||||
|
nargs="?",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
hf_token = args.hf_token if args.hf_token is not None else hf_token
|
||||||
|
|
||||||
|
if hf_token is None:
|
||||||
|
logger.error("HF token is required. Please provide it as an argument or set it in ~/.cache/huggingface/token")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
# TODO: this string has to exercise as much pre-tokenizer functionality as possible
|
# TODO: this string has to exercise as much pre-tokenizer functionality as possible
|
||||||
# will be updated with time - contributions welcome
|
# will be updated with time - contributions welcome
|
||||||
CHK_TXT = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
|
CHK_TXT = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
|
||||||
|
|
||||||
if len(sys.argv) == 2:
|
|
||||||
token = sys.argv[1]
|
|
||||||
if not token.startswith("hf_"):
|
|
||||||
logger.info("Huggingface token seems invalid")
|
|
||||||
logger.info("Usage: python convert_hf_to_gguf_update.py <huggingface_token>")
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
logger.info("Usage: python convert_hf_to_gguf_update.py <huggingface_token>")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# TODO: add models here, base models preferred
|
# TODO: add models here, base models preferred
|
||||||
models = [
|
models = [
|
||||||
{"name": "llama-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-llama/Llama-2-7b-hf", },
|
{"name": "llama-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-llama/Llama-2-7b-hf", },
|
||||||
@ -103,7 +116,6 @@ models = [
|
|||||||
{"name": "exaone", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct", },
|
{"name": "exaone", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct", },
|
||||||
{"name": "phi-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/microsoft/phi-2", },
|
{"name": "phi-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/microsoft/phi-2", },
|
||||||
{"name": "chameleon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/facebook/chameleon-7b", },
|
{"name": "chameleon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/facebook/chameleon-7b", },
|
||||||
{"name": "minerva-7b", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0", },
|
|
||||||
{"name": "roberta-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sentence-transformers/stsb-roberta-base"},
|
{"name": "roberta-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sentence-transformers/stsb-roberta-base"},
|
||||||
{"name": "gigachat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct"},
|
{"name": "gigachat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct"},
|
||||||
{"name": "megrez", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Infinigence/Megrez-3B-Instruct"},
|
{"name": "megrez", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Infinigence/Megrez-3B-Instruct"},
|
||||||
@ -114,7 +126,17 @@ models = [
|
|||||||
{"name": "trillion", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/trillionlabs/Trillion-7B-preview", },
|
{"name": "trillion", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/trillionlabs/Trillion-7B-preview", },
|
||||||
{"name": "bailingmoe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/inclusionAI/Ling-lite", },
|
{"name": "bailingmoe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/inclusionAI/Ling-lite", },
|
||||||
{"name": "llama4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct", },
|
{"name": "llama4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct", },
|
||||||
{"name": "glm4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-hf", },
|
{"name": "pixtral", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistral-community/pixtral-12b", },
|
||||||
|
{"name": "seed-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base", },
|
||||||
|
]
|
||||||
|
|
||||||
|
# some models are known to be broken upstream, so we will skip them as exceptions
|
||||||
|
pre_computed_hashes = [
|
||||||
|
# chatglm-bpe has 2 hashes, why?
|
||||||
|
{"name": "chatglm-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-chat", "chkhsh": "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b"},
|
||||||
|
{"name": "chatglm-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-chat", "chkhsh": "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516"},
|
||||||
|
{"name": "glm4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-hf", "chkhsh": "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2"},
|
||||||
|
{"name": "minerva-7b", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0", "chkhsh": "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35"},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@ -167,9 +189,29 @@ def download_model(model):
|
|||||||
if os.path.isfile(save_path):
|
if os.path.isfile(save_path):
|
||||||
logger.info(f"{name}: File {save_path} already exists - skipping")
|
logger.info(f"{name}: File {save_path} already exists - skipping")
|
||||||
continue
|
continue
|
||||||
download_file_with_auth(f"{repo}/resolve/main/{file}", token, save_path)
|
download_file_with_auth(f"{repo}/resolve/main/{file}", hf_token, save_path)
|
||||||
|
|
||||||
|
|
||||||
|
# get list of existing models and chkhsh from the convert_hf_to_gguf.py file
|
||||||
|
# returns mapping res --> chkhsh
|
||||||
|
def get_existing_models(convert_py):
|
||||||
|
pattern = r'if chkhsh == "([a-f0-9]{64})":\s*\n\s*.*\s*res = "([^"]+)"'
|
||||||
|
matches = re.findall(pattern, convert_py)
|
||||||
|
output = {}
|
||||||
|
for chkhsh, res in matches:
|
||||||
|
output[res] = chkhsh
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
existing_models = {}
|
||||||
|
all_models = models.copy()
|
||||||
|
if not args.full:
|
||||||
|
# Filter out models that already exist in convert_hf_to_gguf.py
|
||||||
|
existing_models = get_existing_models(convert_py)
|
||||||
|
all_models = models.copy()
|
||||||
|
models = [model for model in all_models if model["name"] not in existing_models]
|
||||||
|
|
||||||
|
logging.info(f"Downloading {len(models)} models...")
|
||||||
for model in models:
|
for model in models:
|
||||||
try:
|
try:
|
||||||
download_model(model)
|
download_model(model)
|
||||||
@ -180,9 +222,10 @@ for model in models:
|
|||||||
# generate the source code for the convert_hf_to_gguf.py:get_vocab_base_pre() function:
|
# generate the source code for the convert_hf_to_gguf.py:get_vocab_base_pre() function:
|
||||||
|
|
||||||
src_ifs = ""
|
src_ifs = ""
|
||||||
for model in models:
|
for model in [*all_models, *pre_computed_hashes]:
|
||||||
name = model["name"]
|
name = model["name"]
|
||||||
tokt = model["tokt"]
|
tokt = model["tokt"]
|
||||||
|
chkhsh = model.get("chkhsh")
|
||||||
|
|
||||||
if tokt == TOKENIZER_TYPE.SPM or tokt == TOKENIZER_TYPE.UGM:
|
if tokt == TOKENIZER_TYPE.SPM or tokt == TOKENIZER_TYPE.UGM:
|
||||||
continue
|
continue
|
||||||
@ -193,35 +236,44 @@ for model in models:
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
# create the tokenizer
|
# create the tokenizer
|
||||||
try:
|
if chkhsh is not None:
|
||||||
if name == "t5":
|
# if the model has a pre-computed hash, use it
|
||||||
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}", use_fast=False)
|
logger.info(f"Using pre-computed hash for model {name}: {chkhsh}")
|
||||||
else:
|
elif name in existing_models:
|
||||||
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
# if the model already exists in convert_hf_to_gguf.py, skip compute hash
|
||||||
except OSError as e:
|
chkhsh = existing_models[name]
|
||||||
logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}")
|
else:
|
||||||
continue # Skip to the next model if the tokenizer can't be loaded
|
# otherwise, compute the hash of the tokenizer
|
||||||
|
try:
|
||||||
|
logger.info(f"Loading tokenizer from {f'models/tokenizers/{name}'}...")
|
||||||
|
if name == "t5":
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}", use_fast=False)
|
||||||
|
else:
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
||||||
|
except OSError as e:
|
||||||
|
logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}")
|
||||||
|
continue # Skip to the next model if the tokenizer can't be loaded
|
||||||
|
|
||||||
chktok = tokenizer.encode(CHK_TXT)
|
chktok = tokenizer.encode(CHK_TXT)
|
||||||
chkhsh = sha256(str(chktok).encode()).hexdigest()
|
chkhsh = sha256(str(chktok).encode()).hexdigest()
|
||||||
|
|
||||||
logger.info(f"model: {name}")
|
logger.info(f"model: {name}")
|
||||||
logger.info(f"tokt: {tokt}")
|
logger.info(f"tokt: {tokt}")
|
||||||
logger.info(f"repo: {model['repo']}")
|
logger.info(f"repo: {model['repo']}")
|
||||||
logger.info(f"chktok: {chktok}")
|
logger.info(f"chktok: {chktok}")
|
||||||
logger.info(f"chkhsh: {chkhsh}")
|
logger.info(f"chkhsh: {chkhsh}")
|
||||||
|
|
||||||
# print the "pre_tokenizer" content from the tokenizer.json
|
# print the "pre_tokenizer" content from the tokenizer.json
|
||||||
with open(f"models/tokenizers/{name}/tokenizer.json", "r", encoding="utf-8") as f:
|
with open(f"models/tokenizers/{name}/tokenizer.json", "r", encoding="utf-8") as f:
|
||||||
cfg = json.load(f)
|
cfg = json.load(f)
|
||||||
normalizer = cfg["normalizer"]
|
normalizer = cfg["normalizer"]
|
||||||
logger.info("normalizer: " + json.dumps(normalizer, indent=4))
|
logger.info("normalizer: " + json.dumps(normalizer, indent=4))
|
||||||
pre_tokenizer = cfg["pre_tokenizer"]
|
pre_tokenizer = cfg["pre_tokenizer"]
|
||||||
logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
|
logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
|
||||||
if "ignore_merges" in cfg["model"]:
|
if "ignore_merges" in cfg["model"]:
|
||||||
logger.info("ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4))
|
logger.info("ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4))
|
||||||
|
|
||||||
logger.info("")
|
logger.info("")
|
||||||
|
|
||||||
src_ifs += f" if chkhsh == \"{chkhsh}\":\n"
|
src_ifs += f" if chkhsh == \"{chkhsh}\":\n"
|
||||||
src_ifs += f" # ref: {model['repo']}\n"
|
src_ifs += f" # ref: {model['repo']}\n"
|
||||||
@ -269,8 +321,6 @@ src_func = f"""
|
|||||||
return res
|
return res
|
||||||
"""
|
"""
|
||||||
|
|
||||||
convert_py_pth = pathlib.Path("convert_hf_to_gguf.py")
|
|
||||||
convert_py = convert_py_pth.read_text(encoding="utf-8")
|
|
||||||
convert_py = re.sub(
|
convert_py = re.sub(
|
||||||
r"(# Marker: Start get_vocab_base_pre)(.+?)( +# Marker: End get_vocab_base_pre)",
|
r"(# Marker: Start get_vocab_base_pre)(.+?)( +# Marker: End get_vocab_base_pre)",
|
||||||
lambda m: m.group(1) + src_func + m.group(3),
|
lambda m: m.group(1) + src_func + m.group(3),
|
||||||
@ -286,7 +336,7 @@ logger.info("+++ convert_hf_to_gguf.py was updated")
|
|||||||
|
|
||||||
tests = [
|
tests = [
|
||||||
"ied 4 ½ months",
|
"ied 4 ½ months",
|
||||||
"Führer",
|
"Äpfel",
|
||||||
"",
|
"",
|
||||||
" ",
|
" ",
|
||||||
" ",
|
" ",
|
||||||
@ -365,6 +415,10 @@ for model in models:
|
|||||||
logger.error(f"Failed to load tokenizer for model {name}. Error: {e}")
|
logger.error(f"Failed to load tokenizer for model {name}. Error: {e}")
|
||||||
continue # Skip this model and continue with the next one in the loop
|
continue # Skip this model and continue with the next one in the loop
|
||||||
|
|
||||||
|
if not os.path.exists(f"models/ggml-vocab-{name}.gguf"):
|
||||||
|
logger.info(f"Skip vocab files for model {name}, no GGUF file found")
|
||||||
|
continue
|
||||||
|
|
||||||
with open(f"models/ggml-vocab-{name}.gguf.inp", "w", encoding="utf-8") as f:
|
with open(f"models/ggml-vocab-{name}.gguf.inp", "w", encoding="utf-8") as f:
|
||||||
for text in tests:
|
for text in tests:
|
||||||
f.write(f"{text}")
|
f.write(f"{text}")
|
||||||
|
@ -24,7 +24,7 @@ if 'NO_LOCAL_GGUF' not in os.environ:
|
|||||||
import gguf
|
import gguf
|
||||||
|
|
||||||
# reuse model definitions from convert_hf_to_gguf.py
|
# reuse model definitions from convert_hf_to_gguf.py
|
||||||
from convert_hf_to_gguf import LazyTorchTensor, Model
|
from convert_hf_to_gguf import LazyTorchTensor, ModelBase
|
||||||
|
|
||||||
logger = logging.getLogger("lora-to-gguf")
|
logger = logging.getLogger("lora-to-gguf")
|
||||||
|
|
||||||
@ -340,11 +340,11 @@ if __name__ == '__main__':
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
logger.info(f"Loading base model: {dir_base_model.name}")
|
logger.info(f"Loading base model: {dir_base_model.name}")
|
||||||
hparams = Model.load_hparams(dir_base_model)
|
hparams = ModelBase.load_hparams(dir_base_model)
|
||||||
|
|
||||||
with torch.inference_mode():
|
with torch.inference_mode():
|
||||||
try:
|
try:
|
||||||
model_class = Model.from_model_architecture(hparams["architectures"][0])
|
model_class = ModelBase.from_model_architecture(hparams["architectures"][0])
|
||||||
except NotImplementedError:
|
except NotImplementedError:
|
||||||
logger.error(f"Model {hparams['architectures'][0]} is not supported")
|
logger.error(f"Model {hparams['architectures'][0]} is not supported")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
155
docs/backend/CANN.md
Normal file → Executable file
155
docs/backend/CANN.md
Normal file → Executable file
@ -8,6 +8,7 @@
|
|||||||
- [DataType Supports](#datatype-supports)
|
- [DataType Supports](#datatype-supports)
|
||||||
- [Docker](#docker)
|
- [Docker](#docker)
|
||||||
- [Linux](#linux)
|
- [Linux](#linux)
|
||||||
|
- [Environment variable setup](#environment-variable-setup)
|
||||||
- [TODO](#todo)
|
- [TODO](#todo)
|
||||||
|
|
||||||
|
|
||||||
@ -56,60 +57,82 @@ The llama.cpp CANN backend is designed to support Ascend NPU. It utilize the abi
|
|||||||
|
|
||||||
## Model Supports
|
## Model Supports
|
||||||
|
|
||||||
| Model Name | FP16 | Q8_0 | Q4_0 |
|
| Model Name | FP16 | Q4_0 | Q8_0 |
|
||||||
|:----------------------------|:-----:|:----:|:----:|
|
|:----------------------------|:-----:|:----:|:----:|
|
||||||
| AquilaChat2-7B | √ | √ | √ |
|
| Llama-2 | √ | √ | √ |
|
||||||
| Baichuan-7b | √ | √ | √ |
|
| Llama-3 | √ | √ | √ |
|
||||||
| Baichuan2-7B-Chat | √ | √ | √ |
|
| Mistral-7B | √ | √ | √ |
|
||||||
| bitnet_b1_58-large | √ | √ | √ |
|
| Mistral MOE | √ | √ | √ |
|
||||||
| bloom-560m | √ | x | √ |
|
| DBRX | - | - | - |
|
||||||
| bloomz-alpaca-560m | √ | x | √ |
|
| Falcon | √ | √ | √ |
|
||||||
| c4ai-command-r-35B-v01 | x | x | x |
|
| Chinese LLaMA/Alpaca | √ | √ | √ |
|
||||||
| chatglm3-6B | x | x | x |
|
| Vigogne(French) | √ | √ | √ |
|
||||||
| chinese-alpaca-2-1.3b | √ | √ | √ |
|
| BERT | x | x | x |
|
||||||
| CodeShell-7B | √ | √ | √ |
|
| Koala | √ | √ | √ |
|
||||||
| deepseek-ai_deepseek-coder-1.3B-base | x | x | x |
|
| Baichuan | √ | √ | √ |
|
||||||
| deepseek-ai_DeepSeek-V2-Lite | x | x | x |
|
| Aquila 1 & 2 | √ | √ | √ |
|
||||||
| deepseek-coder-6.7B-instruct | x | x | x |
|
| Starcoder models | √ | √ | √ |
|
||||||
| DeepSeek-V2-Lite-64x1.5B | x | x | x |
|
| Refact | √ | √ | √ |
|
||||||
| falcon-7b-instruct | √ | √ | √ |
|
| MPT | √ | √ | √ |
|
||||||
| flan-t5-large | √ | √ | √ |
|
| Bloom | √ | √ | √ |
|
||||||
| gemma-2-9b-it | √ | √ | √ |
|
| Yi models | √ | √ | √ |
|
||||||
| glm-4-9B | x | x | x |
|
| stablelm models | √ | √ | √ |
|
||||||
| gpt2 | √ | √ | √ |
|
| DeepSeek models | x | x | x |
|
||||||
| Gpt2-163M | √ | √ | √ |
|
| Qwen models | √ | √ | √ |
|
||||||
| granite-3B-code-instruct | √ | √ | √ |
|
| PLaMo-13B | √ | √ | √ |
|
||||||
|
| Phi models | √ | √ | √ |
|
||||||
|
| PhiMoE | √ | √ | √ |
|
||||||
|
| GPT-2 | √ | √ | √ |
|
||||||
|
| Orion | √ | √ | √ |
|
||||||
|
| InternlLM2 | √ | √ | √ |
|
||||||
|
| CodeShell | √ | √ | √ |
|
||||||
|
| Gemma | √ | √ | √ |
|
||||||
|
| Mamba | √ | √ | √ |
|
||||||
|
| Xverse | √ | √ | √ |
|
||||||
|
| command-r models | √ | √ | √ |
|
||||||
|
| Grok-1 | - | - | - |
|
||||||
|
| SEA-LION | √ | √ | √ |
|
||||||
| GritLM-7B | √ | √ | √ |
|
| GritLM-7B | √ | √ | √ |
|
||||||
| internlm2_5-7b-chat | √ | √ | √ |
|
| OLMo | √ | √ | √ |
|
||||||
| koala-7B-HF | √ | √ | √ |
|
| OLMo 2 | √ | √ | √ |
|
||||||
| Llama-2-7b-chat-hf | √ | √ | √ |
|
| OLMoE | √ | √ | √ |
|
||||||
| Llama-3-Smaug-8B | √ | √ | √ |
|
| Granite models | √ | √ | √ |
|
||||||
| Llama2-Chinese-7b-Chat | √ | √ | √ |
|
| GPT-NeoX | √ | √ | √ |
|
||||||
| Llama3-8B | √ | √ | √ |
|
| Pythia | √ | √ | √ |
|
||||||
| Llama3-8b-chinese | √ | √ | √ |
|
| Snowflake-Arctic MoE | - | - | - |
|
||||||
| mamba-130m-hf | √ | √ | √ |
|
| Smaug | √ | √ | √ |
|
||||||
| Mistral-7B-Instruct-v0.2 | √ | √ | √ |
|
| Poro 34B | √ | √ | √ |
|
||||||
| Mixtral-8x7B-Instruct-v0.1 | x | √ | √ |
|
| Bitnet b1.58 models | √ | x | x |
|
||||||
| mpt-7B | √ | √ | √ |
|
| Flan-T5 | √ | √ | √ |
|
||||||
| OLMo-1B-hf | √ | √ | √ |
|
| Open Elm models | x | √ | √ |
|
||||||
| OpenELM-3B-Instruct | √ | √ | √ |
|
| chatGLM3-6B + ChatGLM4-9b + GLMEdge-1.5b + GLMEdge-4b | √ | √ | √ |
|
||||||
| Orion-14b-base | √ | √ | √ |
|
| GLM-4-0414 | √ | √ | √ |
|
||||||
| phi1 | x | x | x |
|
| SmolLM | √ | √ | √ |
|
||||||
| phi2 | x | x | x |
|
| EXAONE-3.0-7.8B-Instruct | √ | √ | √ |
|
||||||
| Phi-3-mini-4k-instruct | √ | √ | √ |
|
| FalconMamba Models | √ | √ | √ |
|
||||||
| plamo-13b | √ | √ | √ |
|
| Jais Models | - | x | x |
|
||||||
| pythia-70M | x | x | x |
|
| Bielik-11B-v2.3 | √ | √ | √ |
|
||||||
| Qwen-7B | √ | √ | √ |
|
| RWKV-6 | - | √ | √ |
|
||||||
| Qwen2-1.5B-Instruct | √ | x | √ |
|
| QRWKV-6 | √ | √ | √ |
|
||||||
| Refact-1_6B-fim | √ | √ | √ |
|
| GigaChat-20B-A3B | x | x | x |
|
||||||
| SmolLM-135M | √ | √ | √ |
|
| Trillion-7B-preview | √ | √ | √ |
|
||||||
| stablelm-zephyr | x | x | x |
|
| Ling models | √ | √ | √ |
|
||||||
| stablelm-2-zephyr-1_6b | x | x | x |
|
|
||||||
| starcoderbase-1b | √ | √ | √ |
|
|
||||||
| starcoder2-3b | √ | √ | √ |
|
**Multimodal**
|
||||||
| vigogne-7b-chat | √ | √ | √ |
|
| Model Name | FP16 | Q4_0 | Q8_0 |
|
||||||
| xverse-7b-chat | √ | √ | √ |
|
|:----------------------------|:-----:|:----:|:----:|
|
||||||
| Yi-6b-Chat | √ | √ | √ |
|
| LLaVA 1.5 models, LLaVA 1.6 models | x | x | x |
|
||||||
|
| BakLLaVA | √ | √ | √ |
|
||||||
|
| Obsidian | √ | - | - |
|
||||||
|
| ShareGPT4V | x | - | - |
|
||||||
|
| MobileVLM 1.7B/3B models | - | - | - |
|
||||||
|
| Yi-VL | - | - | - |
|
||||||
|
| Mini CPM | √ | √ | √ |
|
||||||
|
| Moondream | √ | √ | √ |
|
||||||
|
| Bunny | √ | - | - |
|
||||||
|
| GLM-EDGE | √ | √ | √ |
|
||||||
|
| Qwen2-VL | √ | √ | √ |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -258,6 +281,34 @@ cmake --build build --config release
|
|||||||
### **GitHub contribution**:
|
### **GitHub contribution**:
|
||||||
Please add the **[CANN]** prefix/tag in issues/PRs titles to help the CANN-team check/address them without delay.
|
Please add the **[CANN]** prefix/tag in issues/PRs titles to help the CANN-team check/address them without delay.
|
||||||
|
|
||||||
|
## Updates
|
||||||
|
### Basic Flash Attention Support
|
||||||
|
The basic FA kernel with aclnnops has been added in aclnn_ops.cpp.
|
||||||
|
Currently, the FA only supports the cases with FP16 KV tensors and NO logit softcap.
|
||||||
|
Since the aclnn interface for flash attention cannot support the logit softcap, we will only update the quantized version in the future.
|
||||||
|
|
||||||
|
Authors from Peking University: Bizhao Shi (bshi@pku.edu.cn), Yuxin Yang (yxyang@pku.edu.cn), Ruiyang Ma (ruiyang@stu.pku.edu.cn), and Guojie Luo (gluo@pku.edu.cn).
|
||||||
|
|
||||||
|
We would like to thank Tuo Dai, Shanni Li, and all of the project maintainers from Huawei Technologies Co., Ltd for their help during the code development and pull request.
|
||||||
|
|
||||||
|
## Environment variable setup
|
||||||
|
|
||||||
|
### GGML_CANN_ASYNC_MODE
|
||||||
|
|
||||||
|
Enables asynchronous operator submission. Disabled by default.
|
||||||
|
|
||||||
|
### GGML_CANN_MEM_POOL
|
||||||
|
|
||||||
|
Specifies the memory pool management strategy:
|
||||||
|
|
||||||
|
- vmm: Utilizes a virtual memory manager pool. If hardware support for VMM is unavailable, falls back to the legacy (leg) memory pool.
|
||||||
|
|
||||||
|
- prio: Employs a priority queue-based memory pool management.
|
||||||
|
- leg: Uses a fixed-size buffer pool.
|
||||||
|
|
||||||
|
### GGML_CANN_DISABLE_BUF_POOL_CLEAN
|
||||||
|
|
||||||
|
Controls automatic cleanup of the memory pool. This option is only effective when using the prio or leg memory pool strategies.
|
||||||
|
|
||||||
## TODO
|
## TODO
|
||||||
- Support more models and data types.
|
- Support more models and data types.
|
||||||
|
@ -17,25 +17,25 @@
|
|||||||
|
|
||||||
**SYCL** is a high-level parallel programming model designed to improve developers productivity writing code across various hardware accelerators such as CPUs, GPUs, and FPGAs. It is a single-source language designed for heterogeneous computing and based on standard C++17.
|
**SYCL** is a high-level parallel programming model designed to improve developers productivity writing code across various hardware accelerators such as CPUs, GPUs, and FPGAs. It is a single-source language designed for heterogeneous computing and based on standard C++17.
|
||||||
|
|
||||||
**oneAPI** is an open ecosystem and a standard-based specification, supporting multiple architectures including but not limited to intel CPUs, GPUs and FPGAs. The key components of the oneAPI ecosystem include:
|
**oneAPI** is an open ecosystem and a standard-based specification, supporting multiple architectures including but not limited to Intel CPUs, GPUs and FPGAs. The key components of the oneAPI ecosystem include:
|
||||||
|
|
||||||
- **DPCPP** *(Data Parallel C++)*: The primary oneAPI SYCL implementation, which includes the icpx/icx Compilers.
|
- **DPCPP** *(Data Parallel C++)*: The primary oneAPI SYCL implementation, which includes the icpx/icx Compilers.
|
||||||
- **oneAPI Libraries**: A set of highly optimized libraries targeting multiple domains *(e.g. Intel oneMKL, oneMath and oneDNN)*.
|
- **oneAPI Libraries**: A set of highly optimized libraries targeting multiple domains *(e.g. Intel oneMKL, oneMath and oneDNN)*.
|
||||||
- **oneAPI LevelZero**: A high performance low level interface for fine-grained control over intel iGPUs and dGPUs.
|
- **oneAPI LevelZero**: A high performance low level interface for fine-grained control over Intel iGPUs and dGPUs.
|
||||||
- **Nvidia & AMD Plugins**: These are plugins extending oneAPI's DPCPP support to SYCL on Nvidia and AMD GPU targets.
|
- **Nvidia & AMD Plugins**: These are plugins extending oneAPI's DPCPP support to SYCL on Nvidia and AMD GPU targets.
|
||||||
|
|
||||||
### Llama.cpp + SYCL
|
### Llama.cpp + SYCL
|
||||||
|
|
||||||
The llama.cpp SYCL backend is designed to support **Intel GPU** firstly. Based on the cross-platform feature of SYCL, it also supports other vendor GPUs: Nvidia and AMD.
|
The llama.cpp SYCL backend is primarily designed for **Intel GPUs**.
|
||||||
|
SYCL cross-platform capabilities enable support for Nvidia GPUs as well, with limited support for AMD.
|
||||||
|
|
||||||
## Recommended Release
|
## Recommended Release
|
||||||
|
|
||||||
The SYCL backend would be broken by some PRs due to no online CI.
|
The following releases are verified and recommended:
|
||||||
|
|
||||||
The following release is verified with good quality:
|
|
||||||
|
|
||||||
|Commit ID|Tag|Release|Verified Platform| Update date|
|
|Commit ID|Tag|Release|Verified Platform| Update date|
|
||||||
|-|-|-|-|-|
|
|-|-|-|-|-|
|
||||||
|
|24e86cae7219b0f3ede1d5abdf5bf3ad515cccb8|b5377 |[llama-b5377-bin-win-sycl-x64.zip](https://github.com/ggml-org/llama.cpp/releases/download/b5377/llama-b5377-bin-win-sycl-x64.zip) |ArcB580/Linux/oneAPI 2025.1<br>LNL Arc GPU/Windows 11/oneAPI 2025.1.1|2025-05-15|
|
||||||
|3bcd40b3c593d14261fb2abfabad3c0fb5b9e318|b4040 |[llama-b4040-bin-win-sycl-x64.zip](https://github.com/ggml-org/llama.cpp/releases/download/b4040/llama-b4040-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1<br>MTL Arc GPU/Windows 11/oneAPI 2024.1| 2024-11-19|
|
|3bcd40b3c593d14261fb2abfabad3c0fb5b9e318|b4040 |[llama-b4040-bin-win-sycl-x64.zip](https://github.com/ggml-org/llama.cpp/releases/download/b4040/llama-b4040-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1<br>MTL Arc GPU/Windows 11/oneAPI 2024.1| 2024-11-19|
|
||||||
|fb76ec31a9914b7761c1727303ab30380fd4f05c|b3038 |[llama-b3038-bin-win-sycl-x64.zip](https://github.com/ggml-org/llama.cpp/releases/download/b3038/llama-b3038-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1<br>MTL Arc GPU/Windows 11/oneAPI 2024.1||
|
|fb76ec31a9914b7761c1727303ab30380fd4f05c|b3038 |[llama-b3038-bin-win-sycl-x64.zip](https://github.com/ggml-org/llama.cpp/releases/download/b3038/llama-b3038-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1<br>MTL Arc GPU/Windows 11/oneAPI 2024.1||
|
||||||
|
|
||||||
@ -106,15 +106,14 @@ SYCL backend supports Intel GPU Family:
|
|||||||
|-------------------------------|---------|---------------------------------------|
|
|-------------------------------|---------|---------------------------------------|
|
||||||
| Intel Data Center Max Series | Support | Max 1550, 1100 |
|
| Intel Data Center Max Series | Support | Max 1550, 1100 |
|
||||||
| Intel Data Center Flex Series | Support | Flex 170 |
|
| Intel Data Center Flex Series | Support | Flex 170 |
|
||||||
| Intel Arc Series | Support | Arc 770, 730M, Arc A750 |
|
| Intel Arc Series | Support | Arc 770, 730M, Arc A750, B580 |
|
||||||
| Intel built-in Arc GPU | Support | built-in Arc GPU in Meteor Lake, Arrow Lake |
|
| Intel built-in Arc GPU | Support | built-in Arc GPU in Meteor Lake, Arrow Lake, Lunar Lake |
|
||||||
| Intel iGPU | Support | iGPU in 13700k,iGPU in 13400, i5-1250P, i7-1260P, i7-1165G7 |
|
| Intel iGPU | Support | iGPU in 13700k, 13400, i5-1250P, i7-1260P, i7-1165G7 |
|
||||||
|
|
||||||
*Notes:*
|
*Notes:*
|
||||||
|
|
||||||
- **Memory**
|
- **Memory**
|
||||||
- The device memory is a limitation when running a large model. The loaded model size, *`llm_load_tensors: buffer_size`*, is displayed in the log when running `./bin/llama-cli`.
|
- The device memory is a limitation when running a large model. The loaded model size, *`llm_load_tensors: buffer_size`*, is displayed in the log when running `./bin/llama-cli`.
|
||||||
|
|
||||||
- Please make sure the GPU shared memory from the host is large enough to account for the model's size. For e.g. the *llama-2-7b.Q4_0* requires at least 8.0GB for integrated GPU and 4.0GB for discrete GPU.
|
- Please make sure the GPU shared memory from the host is large enough to account for the model's size. For e.g. the *llama-2-7b.Q4_0* requires at least 8.0GB for integrated GPU and 4.0GB for discrete GPU.
|
||||||
|
|
||||||
- **Execution Unit (EU)**
|
- **Execution Unit (EU)**
|
||||||
@ -138,9 +137,11 @@ Note: AMD GPU support is highly experimental and is incompatible with F16.
|
|||||||
Additionally, it only supports GPUs with a sub_group_size (warp size) of 32.
|
Additionally, it only supports GPUs with a sub_group_size (warp size) of 32.
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
The docker build option is currently limited to *intel GPU* targets.
|
|
||||||
|
The docker build option is currently limited to *Intel GPU* targets.
|
||||||
|
|
||||||
### Build image
|
### Build image
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
# Using FP16
|
# Using FP16
|
||||||
docker build -t llama-cpp-sycl --build-arg="GGML_SYCL_F16=ON" --target light -f .devops/intel.Dockerfile .
|
docker build -t llama-cpp-sycl --build-arg="GGML_SYCL_F16=ON" --target light -f .devops/intel.Dockerfile .
|
||||||
@ -148,9 +149,10 @@ docker build -t llama-cpp-sycl --build-arg="GGML_SYCL_F16=ON" --target light -f
|
|||||||
|
|
||||||
*Notes*:
|
*Notes*:
|
||||||
|
|
||||||
To build in default FP32 *(Slower than FP16 alternative)*, you can remove the `--build-arg="GGML_SYCL_F16=ON"` argument from the previous command.
|
To build in default FP32 *(Slower than FP16 alternative)*, set `--build-arg="GGML_SYCL_F16=OFF"` in the previous command.
|
||||||
|
|
||||||
You can also use the `.devops/llama-server-intel.Dockerfile`, which builds the *"server"* alternative.
|
You can also use the `.devops/llama-server-intel.Dockerfile`, which builds the *"server"* alternative.
|
||||||
|
Check the [documentation for Docker](../docker.md) to see the available images.
|
||||||
|
|
||||||
### Run container
|
### Run container
|
||||||
|
|
||||||
@ -250,7 +252,7 @@ sycl-ls
|
|||||||
|
|
||||||
- **Intel GPU**
|
- **Intel GPU**
|
||||||
|
|
||||||
When targeting an intel GPU, the user should expect one or more level-zero devices among the available SYCL devices. Please make sure that at least one GPU is present, for instance [`level_zero:gpu`] in the sample output below:
|
When targeting an intel GPU, the user should expect one or more devices among the available SYCL devices. Please make sure that at least one GPU is present via `sycl-ls`, for instance `[level_zero:gpu]` in the sample output below:
|
||||||
|
|
||||||
```
|
```
|
||||||
[opencl:acc][opencl:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.10.0.17_160000]
|
[opencl:acc][opencl:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.10.0.17_160000]
|
||||||
@ -282,7 +284,7 @@ For AMD GPUs we should expect at least one SYCL-HIP device [`hip:gpu`]:
|
|||||||
|
|
||||||
#### Intel GPU
|
#### Intel GPU
|
||||||
|
|
||||||
```
|
```sh
|
||||||
./examples/sycl/build.sh
|
./examples/sycl/build.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -351,7 +353,7 @@ cmake --build build --config Release -j -v
|
|||||||
|
|
||||||
#### Retrieve and prepare model
|
#### Retrieve and prepare model
|
||||||
|
|
||||||
You can refer to the general [*Prepare and Quantize*](README.md#prepare-and-quantize) guide for model prepration, or simply download [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_0.gguf) model as example.
|
You can refer to the general [*Prepare and Quantize*](README.md#prepare-and-quantize) guide for model preparation, or download an already quantized model like [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_0.gguf) or [Meta-Llama-3-8B-Instruct-Q4_0.gguf](https://huggingface.co/aptha/Meta-Llama-3-8B-Instruct-Q4_0-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q4_0.gguf).
|
||||||
|
|
||||||
##### Check device
|
##### Check device
|
||||||
|
|
||||||
@ -398,11 +400,15 @@ Choose one of following methods to run.
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
./examples/sycl/run-llama2.sh 0
|
./examples/sycl/run-llama2.sh 0
|
||||||
|
# OR
|
||||||
|
./examples/sycl/run-llama3.sh 0
|
||||||
```
|
```
|
||||||
- Use multiple devices:
|
- Use multiple devices:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
./examples/sycl/run-llama2.sh
|
./examples/sycl/run-llama2.sh
|
||||||
|
# OR
|
||||||
|
./examples/sycl/run-llama3.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Command line
|
2. Command line
|
||||||
@ -425,13 +431,13 @@ Examples:
|
|||||||
- Use device 0:
|
- Use device 0:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -no-cnv -m models/llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm none -mg 0
|
ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -no-cnv -m models/llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 99 -sm none -mg 0
|
||||||
```
|
```
|
||||||
|
|
||||||
- Use multiple devices:
|
- Use multiple devices:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -no-cnv -m models/llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm layer
|
ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -no-cnv -m models/llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 99 -sm layer
|
||||||
```
|
```
|
||||||
|
|
||||||
*Notes:*
|
*Notes:*
|
||||||
@ -452,7 +458,7 @@ use 1 SYCL GPUs: [0] with Max compute units:512
|
|||||||
|
|
||||||
1. Install GPU driver
|
1. Install GPU driver
|
||||||
|
|
||||||
Intel GPU drivers instructions guide and download page can be found here: [Get intel GPU Drivers](https://www.intel.com/content/www/us/en/products/docs/discrete-gpus/arc/software/drivers.html).
|
Intel GPU drivers instructions guide and download page can be found here: [Get Intel GPU Drivers](https://www.intel.com/content/www/us/en/products/docs/discrete-gpus/arc/software/drivers.html).
|
||||||
|
|
||||||
2. Install Visual Studio
|
2. Install Visual Studio
|
||||||
|
|
||||||
@ -629,7 +635,7 @@ Once it is completed, final results will be in **build/Release/bin**
|
|||||||
|
|
||||||
#### Retrieve and prepare model
|
#### Retrieve and prepare model
|
||||||
|
|
||||||
You can refer to the general [*Prepare and Quantize*](README.md#prepare-and-quantize) guide for model prepration, or simply download [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_0.gguf) model as example.
|
You can refer to the general [*Prepare and Quantize*](README.md#prepare-and-quantize) guide for model preparation, or download an already quantized model like [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_0.gguf) or [Meta-Llama-3-8B-Instruct-Q4_0.gguf](https://huggingface.co/aptha/Meta-Llama-3-8B-Instruct-Q4_0-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q4_0.gguf).
|
||||||
|
|
||||||
##### Check device
|
##### Check device
|
||||||
|
|
||||||
@ -648,7 +654,7 @@ Similar to the native `sycl-ls`, available SYCL devices can be queried as follow
|
|||||||
build\bin\llama-ls-sycl-device.exe
|
build\bin\llama-ls-sycl-device.exe
|
||||||
```
|
```
|
||||||
|
|
||||||
This command will only display the selected backend that is supported by SYCL. The default backend is level_zero. For example, in a system with 2 *intel GPU* it would look like the following:
|
This command will only display the selected backend that is supported by SYCL. The default backend is level_zero. For example, in a system with 2 *Intel GPU* it would look like the following:
|
||||||
```
|
```
|
||||||
found 2 SYCL devices:
|
found 2 SYCL devices:
|
||||||
| | | |Compute |Max compute|Max work|Max sub| |
|
| | | |Compute |Max compute|Max work|Max sub| |
|
||||||
@ -658,13 +664,14 @@ found 2 SYCL devices:
|
|||||||
| 1|[level_zero:gpu:1]| Intel(R) UHD Graphics 770| 1.3| 32| 512| 32| 53651849216|
|
| 1|[level_zero:gpu:1]| Intel(R) UHD Graphics 770| 1.3| 32| 512| 32| 53651849216|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Choose level-zero devices
|
#### Choose level-zero devices
|
||||||
|
|
||||||
|Chosen Device ID|Setting|
|
|Chosen Device ID|Setting|
|
||||||
|-|-|
|
|-|-|
|
||||||
|0|`set ONEAPI_DEVICE_SELECTOR="level_zero:1"` or no action|
|
|0|Default option. You may also want to `set ONEAPI_DEVICE_SELECTOR="level_zero:0"`|
|
||||||
|1|`set ONEAPI_DEVICE_SELECTOR="level_zero:1"`|
|
|1|`set ONEAPI_DEVICE_SELECTOR="level_zero:1"`|
|
||||||
|0 & 1|`set ONEAPI_DEVICE_SELECTOR="level_zero:0;level_zero:1"`|
|
|0 & 1|`set ONEAPI_DEVICE_SELECTOR="level_zero:0;level_zero:1"` or `set ONEAPI_DEVICE_SELECTOR="level_zero:*"`|
|
||||||
|
|
||||||
#### Execute
|
#### Execute
|
||||||
|
|
||||||
@ -673,7 +680,13 @@ Choose one of following methods to run.
|
|||||||
1. Script
|
1. Script
|
||||||
|
|
||||||
```
|
```
|
||||||
examples\sycl\win-run-llama2.bat
|
examples\sycl\win-run-llama-2.bat
|
||||||
|
```
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
```
|
||||||
|
examples\sycl\win-run-llama-3.bat
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Command line
|
2. Command line
|
||||||
@ -697,13 +710,13 @@ Examples:
|
|||||||
- Use device 0:
|
- Use device 0:
|
||||||
|
|
||||||
```
|
```
|
||||||
build\bin\llama-cli.exe -no-cnv -m models\llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 33 -s 0 -sm none -mg 0
|
build\bin\llama-cli.exe -no-cnv -m models\llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 99 -sm none -mg 0
|
||||||
```
|
```
|
||||||
|
|
||||||
- Use multiple devices:
|
- Use multiple devices:
|
||||||
|
|
||||||
```
|
```
|
||||||
build\bin\llama-cli.exe -no-cnv -m models\llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 33 -s 0 -sm layer
|
build\bin\llama-cli.exe -no-cnv -m models\llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 99 -sm layer
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@ -714,7 +727,9 @@ Note:
|
|||||||
```sh
|
```sh
|
||||||
detect 1 SYCL GPUs: [0] with top Max compute units:512
|
detect 1 SYCL GPUs: [0] with top Max compute units:512
|
||||||
```
|
```
|
||||||
|
|
||||||
Or
|
Or
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
use 1 SYCL GPUs: [0] with Max compute units:512
|
use 1 SYCL GPUs: [0] with Max compute units:512
|
||||||
```
|
```
|
||||||
@ -726,14 +741,17 @@ use 1 SYCL GPUs: [0] with Max compute units:512
|
|||||||
|
|
||||||
| Name | Value | Function |
|
| Name | Value | Function |
|
||||||
|--------------------|---------------------------------------|---------------------------------------------|
|
|--------------------|---------------------------------------|---------------------------------------------|
|
||||||
| GGML_SYCL | ON (mandatory) | Enable build with SYCL code path.<br>FP32 path - recommended for better perforemance than FP16 on quantized model|
|
| GGML_SYCL | ON (mandatory) | Enable build with SYCL code path. |
|
||||||
| GGML_SYCL_TARGET | INTEL *(default)* \| NVIDIA \| AMD | Set the SYCL target device type. |
|
| GGML_SYCL_TARGET | INTEL *(default)* \| NVIDIA \| AMD | Set the SYCL target device type. |
|
||||||
| GGML_SYCL_DEVICE_ARCH | Optional (except for AMD) | Set the SYCL device architecture, optional except for AMD. Setting the device architecture can improve the performance. See the table [--offload-arch](https://github.com/intel/llvm/blob/sycl/sycl/doc/design/OffloadDesign.md#--offload-arch) for a list of valid architectures. |
|
| GGML_SYCL_DEVICE_ARCH | Optional (except for AMD) | Set the SYCL device architecture, optional except for AMD. Setting the device architecture can improve the performance. See the table [--offload-arch](https://github.com/intel/llvm/blob/sycl/sycl/doc/design/OffloadDesign.md#--offload-arch) for a list of valid architectures. |
|
||||||
| GGML_SYCL_F16 | OFF *(default)* \|ON *(optional)* | Enable FP16 build with SYCL code path. |
|
| GGML_SYCL_F16 | OFF *(default)* \|ON *(optional)* | Enable FP16 build with SYCL code path. (1.) |
|
||||||
| GGML_SYCL_GRAPH | ON *(default)* \|OFF *(Optional)* | Enable build with [SYCL Graph extension](https://github.com/intel/llvm/blob/sycl/sycl/doc/extensions/experimental/sycl_ext_oneapi_graph.asciidoc). |
|
| GGML_SYCL_GRAPH | ON *(default)* \|OFF *(Optional)* | Enable build with [SYCL Graph extension](https://github.com/intel/llvm/blob/sycl/sycl/doc/extensions/experimental/sycl_ext_oneapi_graph.asciidoc). |
|
||||||
|
| GGML_SYCL_DNN | ON *(default)* \|OFF *(Optional)* | Enable build with oneDNN. |
|
||||||
| CMAKE_C_COMPILER | `icx` *(Linux)*, `icx/cl` *(Windows)* | Set `icx` compiler for SYCL code path. |
|
| CMAKE_C_COMPILER | `icx` *(Linux)*, `icx/cl` *(Windows)* | Set `icx` compiler for SYCL code path. |
|
||||||
| CMAKE_CXX_COMPILER | `icpx` *(Linux)*, `icx` *(Windows)* | Set `icpx/icx` compiler for SYCL code path. |
|
| CMAKE_CXX_COMPILER | `icpx` *(Linux)*, `icx` *(Windows)* | Set `icpx/icx` compiler for SYCL code path. |
|
||||||
|
|
||||||
|
1. FP16 is recommended for better prompt processing performance on quantized models. Performance is equivalent in text generation but set `GGML_SYCL_F16=OFF` if you are experiencing issues with FP16 builds.
|
||||||
|
|
||||||
#### Runtime
|
#### Runtime
|
||||||
|
|
||||||
| Name | Value | Function |
|
| Name | Value | Function |
|
||||||
@ -741,6 +759,7 @@ use 1 SYCL GPUs: [0] with Max compute units:512
|
|||||||
| GGML_SYCL_DEBUG | 0 (default) or 1 | Enable log function by macro: GGML_SYCL_DEBUG |
|
| GGML_SYCL_DEBUG | 0 (default) or 1 | Enable log function by macro: GGML_SYCL_DEBUG |
|
||||||
| GGML_SYCL_DISABLE_OPT | 0 (default) or 1 | Disable optimize features based on Intel GPU type, to compare the performance increase |
|
| GGML_SYCL_DISABLE_OPT | 0 (default) or 1 | Disable optimize features based on Intel GPU type, to compare the performance increase |
|
||||||
| GGML_SYCL_DISABLE_GRAPH | 0 or 1 (default) | Disable running computations through SYCL Graphs feature. Disabled by default because graph performance isn't yet better than non-graph performance. |
|
| GGML_SYCL_DISABLE_GRAPH | 0 or 1 (default) | Disable running computations through SYCL Graphs feature. Disabled by default because graph performance isn't yet better than non-graph performance. |
|
||||||
|
| GGML_SYCL_DISABLE_DNN | 0 (default) or 1 | Disable running computations through oneDNN and always use oneMKL. |
|
||||||
| ZES_ENABLE_SYSMAN | 0 (default) or 1 | Support to get free memory of GPU by sycl::aspect::ext_intel_free_memory.<br>Recommended to use when --split-mode = layer |
|
| ZES_ENABLE_SYSMAN | 0 (default) or 1 | Support to get free memory of GPU by sycl::aspect::ext_intel_free_memory.<br>Recommended to use when --split-mode = layer |
|
||||||
|
|
||||||
|
|
||||||
@ -750,7 +769,7 @@ use 1 SYCL GPUs: [0] with Max compute units:512
|
|||||||
|
|
||||||
## Q&A
|
## Q&A
|
||||||
|
|
||||||
- Error: `error while loading shared libraries: libsycl.so.7: cannot open shared object file: No such file or directory`.
|
- Error: `error while loading shared libraries: libsycl.so: cannot open shared object file: No such file or directory`.
|
||||||
|
|
||||||
- Potential cause: Unavailable oneAPI installation or not set ENV variables.
|
- Potential cause: Unavailable oneAPI installation or not set ENV variables.
|
||||||
- Solution: Install *oneAPI base toolkit* and enable its ENV through: `source /opt/intel/oneapi/setvars.sh`.
|
- Solution: Install *oneAPI base toolkit* and enable its ENV through: `source /opt/intel/oneapi/setvars.sh`.
|
||||||
@ -779,18 +798,18 @@ use 1 SYCL GPUs: [0] with Max compute units:512
|
|||||||
|
|
||||||
It's same for other projects including llama.cpp SYCL backend.
|
It's same for other projects including llama.cpp SYCL backend.
|
||||||
|
|
||||||
- Meet issue: `Native API failed. Native API returns: -6 (PI_ERROR_OUT_OF_HOST_MEMORY) -6 (PI_ERROR_OUT_OF_HOST_MEMORY) -999 (UNKNOWN PI error)` or `failed to allocate SYCL0 buffer`
|
- `Native API failed. Native API returns: 39 (UR_RESULT_ERROR_OUT_OF_DEVICE_MEMORY)`, `ggml_backend_sycl_buffer_type_alloc_buffer: can't allocate 3503030272 Bytes of memory on device`, or `failed to allocate SYCL0 buffer`
|
||||||
|
|
||||||
Device Memory is not enough.
|
You are running out of Device Memory.
|
||||||
|
|
||||||
|Reason|Solution|
|
|Reason|Solution|
|
||||||
|-|-|
|
|-|-|
|
||||||
|Default Context is too big. It leads to more memory usage.|Set `-c 8192` or smaller value.|
|
| The default context is too big. It leads to excessive memory usage.|Set `-c 8192` or a smaller value.|
|
||||||
|Model is big and require more memory than device's.|Choose smaller quantized model, like Q5 -> Q4;<br>Use more than one devices to load model.|
|
| The model is too big and requires more memory than what is available.|Choose a smaller model or change to a smaller quantization, like Q5 -> Q4;<br>Alternatively, use more than one device to load model.|
|
||||||
|
|
||||||
### **GitHub contribution**:
|
### **GitHub contribution**:
|
||||||
Please add the **[SYCL]** prefix/tag in issues/PRs titles to help the SYCL-team check/address them without delay.
|
Please add the `SYCL :` prefix/tag in issues/PRs titles to help the SYCL contributors to check/address them without delay.
|
||||||
|
|
||||||
## TODO
|
## TODO
|
||||||
|
|
||||||
- NA
|
- Review ZES_ENABLE_SYSMAN: https://github.com/intel/compute-runtime/blob/master/programmers-guide/SYSMAN.md#support-and-limitations
|
||||||
|
157
docs/build-s390x.md
Normal file
157
docs/build-s390x.md
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
> [!IMPORTANT]
|
||||||
|
> This build documentation is specific only to IBM Z & LinuxONE mainframes (s390x). You can find the build documentation for other architectures: [build.md](build.md).
|
||||||
|
|
||||||
|
# Build llama.cpp locally (for s390x)
|
||||||
|
|
||||||
|
The main product of this project is the `llama` library. Its C-style interface can be found in [include/llama.h](../include/llama.h).
|
||||||
|
|
||||||
|
The project also includes many example programs and tools using the `llama` library. The examples range from simple, minimal code snippets to sophisticated sub-projects such as an OpenAI-compatible HTTP server.
|
||||||
|
|
||||||
|
**To get the code:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/ggml-org/llama.cpp
|
||||||
|
cd llama.cpp
|
||||||
|
```
|
||||||
|
|
||||||
|
## CPU Build with BLAS
|
||||||
|
|
||||||
|
Building llama.cpp with BLAS support is highly recommended as it has shown to provide performance improvements.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -S . -B build \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DGGML_BLAS=ON \
|
||||||
|
-DGGML_BLAS_VENDOR=OpenBLAS
|
||||||
|
|
||||||
|
cmake --build build --config Release -j $(nproc)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Notes**:
|
||||||
|
- For faster repeated compilation, install [ccache](https://ccache.dev/)
|
||||||
|
- By default, VXE/VXE2 is enabled. To disable it (not recommended):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -S . -B build \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DGGML_BLAS=ON \
|
||||||
|
-DGGML_BLAS_VENDOR=OpenBLAS \
|
||||||
|
-DGGML_VXE=OFF
|
||||||
|
|
||||||
|
cmake --build build --config Release -j $(nproc)
|
||||||
|
```
|
||||||
|
|
||||||
|
- For debug builds:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -S . -B build \
|
||||||
|
-DCMAKE_BUILD_TYPE=Debug \
|
||||||
|
-DGGML_BLAS=ON \
|
||||||
|
-DGGML_BLAS_VENDOR=OpenBLAS
|
||||||
|
|
||||||
|
cmake --build build --config Debug -j $(nproc)
|
||||||
|
```
|
||||||
|
|
||||||
|
- For static builds, add `-DBUILD_SHARED_LIBS=OFF`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -S . -B build \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DGGML_BLAS=ON \
|
||||||
|
-DGGML_BLAS_VENDOR=OpenBLAS \
|
||||||
|
-DBUILD_SHARED_LIBS=OFF
|
||||||
|
|
||||||
|
cmake --build build --config Release -j $(nproc)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Getting GGUF Models
|
||||||
|
|
||||||
|
All models need to be converted to Big-Endian. You can achieve this in three cases:
|
||||||
|
|
||||||
|
1. **Use pre-converted models verified for use on IBM Z & LinuxONE (easiest)**
|
||||||
|
|
||||||
|
You can find popular models pre-converted and verified at [s390x Ready Models](hf.co/collections/taronaeo/s390x-ready-models-672765393af438d0ccb72a08).
|
||||||
|
|
||||||
|
These models and their respective tokenizers are verified to run correctly on IBM Z & LinuxONE.
|
||||||
|
|
||||||
|
2. **Convert safetensors model to GGUF Big-Endian directly (recommended)**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 convert_hf_to_gguf.py \
|
||||||
|
--outfile model-name-be.f16.gguf \
|
||||||
|
--outtype f16 \
|
||||||
|
--bigendian \
|
||||||
|
model-directory/
|
||||||
|
```
|
||||||
|
|
||||||
|
For example,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 convert_hf_to_gguf.py \
|
||||||
|
--outfile granite-3.3-2b-instruct-be.f16.gguf \
|
||||||
|
--outtype f16 \
|
||||||
|
--bigendian \
|
||||||
|
granite-3.3-2b-instruct/
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Convert existing GGUF Little-Endian model to Big-Endian**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 gguf-py/gguf/scripts/gguf_convert_endian.py model-name.f16.gguf BIG
|
||||||
|
```
|
||||||
|
|
||||||
|
For example,
|
||||||
|
```bash
|
||||||
|
python3 gguf-py/gguf/scripts/gguf_convert_endian.py granite-3.3-2b-instruct-le.f16.gguf BIG
|
||||||
|
mv granite-3.3-2b-instruct-le.f16.gguf granite-3.3-2b-instruct-be.f16.gguf
|
||||||
|
```
|
||||||
|
|
||||||
|
**Notes:**
|
||||||
|
- The GGUF endian conversion script may not support all data types at the moment and may fail for some models/quantizations. When that happens, please try manually converting the safetensors model to GGUF Big-Endian via Step 2.
|
||||||
|
|
||||||
|
## IBM Accelerators
|
||||||
|
|
||||||
|
### 1. SIMD Acceleration
|
||||||
|
|
||||||
|
Only available in IBM z15 or later system with the `-DGGML_VXE=ON` (turned on by default) compile flag. No hardware acceleration is possible with llama.cpp with older systems, such as IBM z14 or EC13. In such systems, the APIs can still run but will use a scalar implementation.
|
||||||
|
|
||||||
|
### 2. zDNN Accelerator
|
||||||
|
|
||||||
|
*Only available in IBM z16 or later system. No direction at the moment.*
|
||||||
|
|
||||||
|
### 3. Spyre Accelerator
|
||||||
|
|
||||||
|
*No direction at the moment.*
|
||||||
|
|
||||||
|
## Performance Tuning
|
||||||
|
|
||||||
|
### 1. Virtualization Setup
|
||||||
|
|
||||||
|
It is strongly recommended to use only LPAR (Type-1) virtualization to get the most performance.
|
||||||
|
|
||||||
|
Note: Type-2 virtualization is not supported at the moment, while you can get it running, the performance will not be the best.
|
||||||
|
|
||||||
|
### 2. IFL (Core) Count
|
||||||
|
|
||||||
|
It is recommended to allocate a minimum of 8 shared IFLs assigned to the LPAR. Increasing the IFL count past 8 shared IFLs will only improve Prompt Processing performance but not Token Generation.
|
||||||
|
|
||||||
|
Note: IFL count does not equate to vCPU count.
|
||||||
|
|
||||||
|
### 3. SMT vs NOSMT (Simultaneous Multithreading)
|
||||||
|
|
||||||
|
It is strongly recommended to disable SMT via the kernel boot parameters as it negatively affects performance. Please refer to your Linux distribution's guide on disabling SMT via kernel boot parameters.
|
||||||
|
|
||||||
|
### 4. BLAS vs NOBLAS
|
||||||
|
|
||||||
|
IBM VXE/VXE2 SIMD acceleration depends on the BLAS implementation. It is strongly recommended to use BLAS.
|
||||||
|
|
||||||
|
## Getting Help on IBM Z & LinuxONE
|
||||||
|
|
||||||
|
1. **Bugs, Feature Requests**
|
||||||
|
|
||||||
|
Please file an issue in llama.cpp and ensure that the title contains "s390x".
|
||||||
|
|
||||||
|
2. **Other Questions**
|
||||||
|
|
||||||
|
Please reach out directly to [aionz@us.ibm.com](mailto:aionz@us.ibm.com).
|
||||||
|
|
@ -1,5 +1,9 @@
|
|||||||
# Build llama.cpp locally
|
# Build llama.cpp locally
|
||||||
|
|
||||||
|
The main product of this project is the `llama` library. Its C-style interface can be found in [include/llama.h](include/llama.h).
|
||||||
|
|
||||||
|
The project also includes many example programs and tools using the `llama` library. The examples range from simple, minimal code snippets to sophisticated sub-projects such as an OpenAI-compatible HTTP server.
|
||||||
|
|
||||||
**To get the Code:**
|
**To get the Code:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -63,6 +67,7 @@ cmake --build build --config Release
|
|||||||
cmake --preset x64-windows-llvm-release
|
cmake --preset x64-windows-llvm-release
|
||||||
cmake --build build-x64-windows-llvm-release
|
cmake --build build-x64-windows-llvm-release
|
||||||
```
|
```
|
||||||
|
- Curl usage is enabled by default and can be turned off with `-DLLAMA_CURL=OFF`. Otherwise you need to install development libraries for libcurl.
|
||||||
|
|
||||||
## BLAS Build
|
## BLAS Build
|
||||||
|
|
||||||
@ -259,8 +264,6 @@ You can download it from your Linux distro's package manager or from here: [ROCm
|
|||||||
cmake -S . -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \
|
cmake -S . -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \
|
||||||
&& cmake --build build --config Release -- -j 16
|
&& cmake --build build --config Release -- -j 16
|
||||||
```
|
```
|
||||||
On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DGGML_HIP_UMA=ON`.
|
|
||||||
However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs).
|
|
||||||
|
|
||||||
To enhance flash attention performance on RDNA3+ or CDNA architectures, you can utilize the rocWMMA library by enabling the `-DGGML_HIP_ROCWMMA_FATTN=ON` option. This requires rocWMMA headers to be installed on the build system.
|
To enhance flash attention performance on RDNA3+ or CDNA architectures, you can utilize the rocWMMA library by enabling the `-DGGML_HIP_ROCWMMA_FATTN=ON` option. This requires rocWMMA headers to be installed on the build system.
|
||||||
|
|
||||||
@ -296,6 +299,10 @@ You can download it from your Linux distro's package manager or from here: [ROCm
|
|||||||
The environment variable [`HIP_VISIBLE_DEVICES`](https://rocm.docs.amd.com/en/latest/understand/gpu_isolation.html#hip-visible-devices) can be used to specify which GPU(s) will be used.
|
The environment variable [`HIP_VISIBLE_DEVICES`](https://rocm.docs.amd.com/en/latest/understand/gpu_isolation.html#hip-visible-devices) can be used to specify which GPU(s) will be used.
|
||||||
If your GPU is not officially supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 (e.g. gfx1030, gfx1031, or gfx1035) or 11.0.0 on RDNA3.
|
If your GPU is not officially supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 (e.g. gfx1030, gfx1031, or gfx1035) or 11.0.0 on RDNA3.
|
||||||
|
|
||||||
|
### Unified Memory
|
||||||
|
|
||||||
|
On Linux it is possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting environment variable `GGML_CUDA_ENABLE_UNIFIED_MEMORY=1`. However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs).
|
||||||
|
|
||||||
## Vulkan
|
## Vulkan
|
||||||
|
|
||||||
**Windows**
|
**Windows**
|
||||||
|
@ -9,10 +9,10 @@ Adding a model requires few steps:
|
|||||||
After following these steps, you can open PR.
|
After following these steps, you can open PR.
|
||||||
|
|
||||||
Also, it is important to check that the examples and main ggml backends (CUDA, METAL, CPU) are working with the new architecture, especially:
|
Also, it is important to check that the examples and main ggml backends (CUDA, METAL, CPU) are working with the new architecture, especially:
|
||||||
- [main](/examples/main/)
|
- [main](/tools/main/)
|
||||||
- [imatrix](/examples/imatrix/)
|
- [imatrix](/tools/imatrix/)
|
||||||
- [quantize](/examples/quantize/)
|
- [quantize](/tools/quantize/)
|
||||||
- [server](/examples/server/)
|
- [server](/tools/server/)
|
||||||
|
|
||||||
### 1. Convert the model to GGUF
|
### 1. Convert the model to GGUF
|
||||||
|
|
||||||
|
@ -22,6 +22,9 @@ Additionally, there the following images, similar to the above:
|
|||||||
- `ghcr.io/ggml-org/llama.cpp:full-musa`: Same as `full` but compiled with MUSA support. (platforms: `linux/amd64`)
|
- `ghcr.io/ggml-org/llama.cpp:full-musa`: Same as `full` but compiled with MUSA support. (platforms: `linux/amd64`)
|
||||||
- `ghcr.io/ggml-org/llama.cpp:light-musa`: Same as `light` but compiled with MUSA support. (platforms: `linux/amd64`)
|
- `ghcr.io/ggml-org/llama.cpp:light-musa`: Same as `light` but compiled with MUSA support. (platforms: `linux/amd64`)
|
||||||
- `ghcr.io/ggml-org/llama.cpp:server-musa`: Same as `server` but compiled with MUSA support. (platforms: `linux/amd64`)
|
- `ghcr.io/ggml-org/llama.cpp:server-musa`: Same as `server` but compiled with MUSA support. (platforms: `linux/amd64`)
|
||||||
|
- `ghcr.io/ggml-org/llama.cpp:full-intel`: Same as `full` but compiled with SYCL support. (platforms: `linux/amd64`)
|
||||||
|
- `ghcr.io/ggml-org/llama.cpp:light-intel`: Same as `light` but compiled with SYCL support. (platforms: `linux/amd64`)
|
||||||
|
- `ghcr.io/ggml-org/llama.cpp:server-intel`: Same as `server` but compiled with SYCL support. (platforms: `linux/amd64`)
|
||||||
|
|
||||||
The GPU enabled images are not currently tested by CI beyond being built. They are not built with any variation from the ones in the Dockerfiles defined in [.devops/](../.devops/) and the GitHub Action defined in [.github/workflows/docker.yml](../.github/workflows/docker.yml). If you need different settings (for example, a different CUDA, ROCm or MUSA library, you'll need to build the images locally for now).
|
The GPU enabled images are not currently tested by CI beyond being built. They are not built with any variation from the ones in the Dockerfiles defined in [.devops/](../.devops/) and the GitHub Action defined in [.github/workflows/docker.yml](../.github/workflows/docker.yml). If you need different settings (for example, a different CUDA, ROCm or MUSA library, you'll need to build the images locally for now).
|
||||||
|
|
||||||
@ -104,7 +107,7 @@ You may want to pass in some different `ARGS`, depending on the MUSA environment
|
|||||||
|
|
||||||
The defaults are:
|
The defaults are:
|
||||||
|
|
||||||
- `MUSA_VERSION` set to `rc3.1.1`
|
- `MUSA_VERSION` set to `rc4.0.1`
|
||||||
|
|
||||||
The resulting images, are essentially the same as the non-MUSA images:
|
The resulting images, are essentially the same as the non-MUSA images:
|
||||||
|
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
[chat.h](../common/chat.h) (https://github.com/ggml-org/llama.cpp/pull/9639) adds support for [OpenAI-style function calling](https://platform.openai.com/docs/guides/function-calling) and is used in:
|
[chat.h](../common/chat.h) (https://github.com/ggml-org/llama.cpp/pull/9639) adds support for [OpenAI-style function calling](https://platform.openai.com/docs/guides/function-calling) and is used in:
|
||||||
- `llama-server` when started w/ `--jinja` flag
|
- `llama-server` when started w/ `--jinja` flag
|
||||||
- `llama-cli` (WIP: https://github.com/ggml-org/llama.cpp/pull/11556)
|
|
||||||
|
|
||||||
## Universal support w/ Native & Generic handlers
|
## Universal support w/ Native & Generic handlers
|
||||||
|
|
||||||
@ -12,7 +11,7 @@ Function calling is supported for all models (see https://github.com/ggml-org/ll
|
|||||||
- Llama 3.1 / 3.3 (including builtin tools support - tool names for `wolfram_alpha`, `web_search` / `brave_search`, `code_interpreter`), Llama 3.2
|
- Llama 3.1 / 3.3 (including builtin tools support - tool names for `wolfram_alpha`, `web_search` / `brave_search`, `code_interpreter`), Llama 3.2
|
||||||
- Functionary v3.1 / v3.2
|
- Functionary v3.1 / v3.2
|
||||||
- Hermes 2/3, Qwen 2.5
|
- Hermes 2/3, Qwen 2.5
|
||||||
- Qwen 2.5 Coder (WIP: https://github.com/ggml-org/llama.cpp/pull/12034)
|
- Qwen 2.5 Coder
|
||||||
- Mistral Nemo
|
- Mistral Nemo
|
||||||
- Firefunction v2
|
- Firefunction v2
|
||||||
- Command R7B
|
- Command R7B
|
||||||
@ -325,36 +324,65 @@ To get the official template from original HuggingFace repos, you can use [scrip
|
|||||||
> [!TIP]
|
> [!TIP]
|
||||||
> If there is no official `tool_use` Jinja template, you may want to set `--chat-template chatml` to use a default that works with many models (YMMV!), or write your own (e.g. we provide a custom [llama-cpp-deepseek-r1.jinja](../models/templates/llama-cpp-deepseek-r1.jinja) for DeepSeek R1 distills)
|
> If there is no official `tool_use` Jinja template, you may want to set `--chat-template chatml` to use a default that works with many models (YMMV!), or write your own (e.g. we provide a custom [llama-cpp-deepseek-r1.jinja](../models/templates/llama-cpp-deepseek-r1.jinja) for DeepSeek R1 distills)
|
||||||
|
|
||||||
|
> [!CAUTION]
|
||||||
|
> Beware of extreme KV quantizations (e.g. `-ctk q4_0`), they can substantially degrade the model's tool calling performance.
|
||||||
|
|
||||||
Test in CLI (or with any library / software that can use OpenAI-compatible API backends):
|
Test in CLI (or with any library / software that can use OpenAI-compatible API backends):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl http://localhost:8080/v1/chat/completions -d '{
|
curl http://localhost:8080/v1/chat/completions -d '{
|
||||||
"model": "gpt-3.5-turbo",
|
"model": "gpt-3.5-turbo",
|
||||||
"tools": [
|
"tools": [
|
||||||
{
|
{
|
||||||
"type":"function",
|
"type":"function",
|
||||||
"function":{
|
"function":{
|
||||||
"name":"python",
|
"name":"python",
|
||||||
"description":"Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.",
|
"description":"Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.",
|
||||||
"parameters":{
|
"parameters":{
|
||||||
"type":"object",
|
"type":"object",
|
||||||
"properties":{
|
"properties":{
|
||||||
"code":{
|
"code":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
"description":"The code to run in the ipython interpreter."
|
"description":"The code to run in the ipython interpreter."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required":["code"]
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"required":["code"]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
],
|
||||||
],
|
"messages": [
|
||||||
"messages": [
|
{
|
||||||
{
|
"role": "user",
|
||||||
"role": "user",
|
"content": "Print a hello world message with python."
|
||||||
"content": "Print a hello world message with python."
|
}
|
||||||
}
|
]
|
||||||
]
|
}'
|
||||||
|
|
||||||
|
|
||||||
|
curl http://localhost:8080/v1/chat/completions -d '{
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
"messages": [
|
||||||
|
{"role": "system", "content": "You are a chatbot that uses tools/functions. Dont overthink things."},
|
||||||
|
{"role": "user", "content": "What is the weather in Istanbul?"}
|
||||||
|
],
|
||||||
|
"tools": [{
|
||||||
|
"type":"function",
|
||||||
|
"function":{
|
||||||
|
"name":"get_current_weather",
|
||||||
|
"description":"Get the current weather in a given location",
|
||||||
|
"parameters":{
|
||||||
|
"type":"object",
|
||||||
|
"properties":{
|
||||||
|
"location":{
|
||||||
|
"type":"string",
|
||||||
|
"description":"The city and country/state, e.g. `San Francisco, CA`, or `Paris, France`"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required":["location"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}]
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1,28 +1,42 @@
|
|||||||
# Install pre-built version of llama.cpp
|
# Install pre-built version of llama.cpp
|
||||||
|
|
||||||
## Homebrew
|
| Install via | Windows | Mac | Linux |
|
||||||
|
|-------------|---------|-----|-------|
|
||||||
|
| Winget | ✅ | | |
|
||||||
|
| Homebrew | | ✅ | ✅ |
|
||||||
|
| MacPorts | | ✅ | |
|
||||||
|
| Nix | | ✅ | ✅ |
|
||||||
|
|
||||||
On Mac and Linux, the homebrew package manager can be used via
|
## Winget (Windows)
|
||||||
|
|
||||||
|
```sh
|
||||||
|
winget install llama.cpp
|
||||||
|
```
|
||||||
|
|
||||||
|
The package is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggml-org/llama.cpp/issues/8188
|
||||||
|
|
||||||
|
## Homebrew (Mac and Linux)
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
brew install llama.cpp
|
brew install llama.cpp
|
||||||
```
|
```
|
||||||
|
|
||||||
The formula is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggml-org/llama.cpp/discussions/7668
|
The formula is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggml-org/llama.cpp/discussions/7668
|
||||||
|
|
||||||
## MacPorts
|
## MacPorts (Mac)
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
sudo port install llama.cpp
|
sudo port install llama.cpp
|
||||||
```
|
```
|
||||||
see also: https://ports.macports.org/port/llama.cpp/details/
|
|
||||||
|
|
||||||
## Nix
|
See also: https://ports.macports.org/port/llama.cpp/details/
|
||||||
|
|
||||||
On Mac and Linux, the Nix package manager can be used via
|
## Nix (Mac and Linux)
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
nix profile install nixpkgs#llama-cpp
|
nix profile install nixpkgs#llama-cpp
|
||||||
```
|
```
|
||||||
|
|
||||||
For flake enabled installs.
|
For flake enabled installs.
|
||||||
|
|
||||||
Or
|
Or
|
||||||
@ -34,13 +48,3 @@ nix-env --file '<nixpkgs>' --install --attr llama-cpp
|
|||||||
For non-flake enabled installs.
|
For non-flake enabled installs.
|
||||||
|
|
||||||
This expression is automatically updated within the [nixpkgs repo](https://github.com/NixOS/nixpkgs/blob/nixos-24.05/pkgs/by-name/ll/llama-cpp/package.nix#L164).
|
This expression is automatically updated within the [nixpkgs repo](https://github.com/NixOS/nixpkgs/blob/nixos-24.05/pkgs/by-name/ll/llama-cpp/package.nix#L164).
|
||||||
|
|
||||||
## Flox
|
|
||||||
|
|
||||||
On Mac and Linux, Flox can be used to install llama.cpp within a Flox environment via
|
|
||||||
|
|
||||||
```sh
|
|
||||||
flox install llama-cpp
|
|
||||||
```
|
|
||||||
|
|
||||||
Flox follows the nixpkgs build of llama.cpp.
|
|
||||||
|
113
docs/multimodal.md
Normal file
113
docs/multimodal.md
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
# Multimodal
|
||||||
|
|
||||||
|
llama.cpp supports multimodal input via `libmtmd`. Currently, there are 2 tools support this feature:
|
||||||
|
- [llama-mtmd-cli](../tools/mtmd/README.md)
|
||||||
|
- [llama-server](../tools/server/README.md) via OpenAI-compatible `/chat/completions` API
|
||||||
|
|
||||||
|
Currently, we support **image** and **audio** input. Audio is highly experimental and may have reduced quality.
|
||||||
|
|
||||||
|
To enable it, you can use one of the 2 methods below:
|
||||||
|
|
||||||
|
- Use `-hf` option with a supported model (see a list of pre-quantized model below)
|
||||||
|
- To load a model using `-hf` while disabling multimodal, use `--no-mmproj`
|
||||||
|
- To load a model using `-hf` while using a custom mmproj file, use `--mmproj local_file.gguf`
|
||||||
|
- Use `-m model.gguf` option with `--mmproj file.gguf` to specify text and multimodal projector respectively
|
||||||
|
|
||||||
|
By default, multimodal projector will be offloaded to GPU. To disable this, add `--no-mmproj-offload`
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# simple usage with CLI
|
||||||
|
llama-mtmd-cli -hf ggml-org/gemma-3-4b-it-GGUF
|
||||||
|
|
||||||
|
# simple usage with server
|
||||||
|
llama-server -hf ggml-org/gemma-3-4b-it-GGUF
|
||||||
|
|
||||||
|
# using local file
|
||||||
|
llama-server -m gemma-3-4b-it-Q4_K_M.gguf --mmproj mmproj-gemma-3-4b-it-Q4_K_M.gguf
|
||||||
|
|
||||||
|
# no GPU offload
|
||||||
|
llama-server -hf ggml-org/gemma-3-4b-it-GGUF --no-mmproj-offload
|
||||||
|
```
|
||||||
|
|
||||||
|
## Pre-quantized models
|
||||||
|
|
||||||
|
These are ready-to-use models, most of them come with `Q4_K_M` quantization by default. They can be found at the Hugging Face page of the ggml-org: https://huggingface.co/collections/ggml-org/multimodal-ggufs-68244e01ff1f39e5bebeeedc
|
||||||
|
|
||||||
|
Replaces the `(tool_name)` with the name of binary you want to use. For example, `llama-mtmd-cli` or `llama-server`
|
||||||
|
|
||||||
|
NOTE: some models may require large context window, for example: `-c 8192`
|
||||||
|
|
||||||
|
**Vision models**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Gemma 3
|
||||||
|
(tool_name) -hf ggml-org/gemma-3-4b-it-GGUF
|
||||||
|
(tool_name) -hf ggml-org/gemma-3-12b-it-GGUF
|
||||||
|
(tool_name) -hf ggml-org/gemma-3-27b-it-GGUF
|
||||||
|
|
||||||
|
# SmolVLM
|
||||||
|
(tool_name) -hf ggml-org/SmolVLM-Instruct-GGUF
|
||||||
|
(tool_name) -hf ggml-org/SmolVLM-256M-Instruct-GGUF
|
||||||
|
(tool_name) -hf ggml-org/SmolVLM-500M-Instruct-GGUF
|
||||||
|
(tool_name) -hf ggml-org/SmolVLM2-2.2B-Instruct-GGUF
|
||||||
|
(tool_name) -hf ggml-org/SmolVLM2-256M-Video-Instruct-GGUF
|
||||||
|
(tool_name) -hf ggml-org/SmolVLM2-500M-Video-Instruct-GGUF
|
||||||
|
|
||||||
|
# Pixtral 12B
|
||||||
|
(tool_name) -hf ggml-org/pixtral-12b-GGUF
|
||||||
|
|
||||||
|
# Qwen 2 VL
|
||||||
|
(tool_name) -hf ggml-org/Qwen2-VL-2B-Instruct-GGUF
|
||||||
|
(tool_name) -hf ggml-org/Qwen2-VL-7B-Instruct-GGUF
|
||||||
|
|
||||||
|
# Qwen 2.5 VL
|
||||||
|
(tool_name) -hf ggml-org/Qwen2.5-VL-3B-Instruct-GGUF
|
||||||
|
(tool_name) -hf ggml-org/Qwen2.5-VL-7B-Instruct-GGUF
|
||||||
|
(tool_name) -hf ggml-org/Qwen2.5-VL-32B-Instruct-GGUF
|
||||||
|
(tool_name) -hf ggml-org/Qwen2.5-VL-72B-Instruct-GGUF
|
||||||
|
|
||||||
|
# Mistral Small 3.1 24B (IQ2_M quantization)
|
||||||
|
(tool_name) -hf ggml-org/Mistral-Small-3.1-24B-Instruct-2503-GGUF
|
||||||
|
|
||||||
|
# InternVL 2.5 and 3
|
||||||
|
(tool_name) -hf ggml-org/InternVL2_5-1B-GGUF
|
||||||
|
(tool_name) -hf ggml-org/InternVL2_5-4B-GGUF
|
||||||
|
(tool_name) -hf ggml-org/InternVL3-1B-Instruct-GGUF
|
||||||
|
(tool_name) -hf ggml-org/InternVL3-2B-Instruct-GGUF
|
||||||
|
(tool_name) -hf ggml-org/InternVL3-8B-Instruct-GGUF
|
||||||
|
(tool_name) -hf ggml-org/InternVL3-14B-Instruct-GGUF
|
||||||
|
|
||||||
|
# Llama 4 Scout
|
||||||
|
(tool_name) -hf ggml-org/Llama-4-Scout-17B-16E-Instruct-GGUF
|
||||||
|
|
||||||
|
# Moondream2 20250414 version
|
||||||
|
(tool_name) -hf ggml-org/moondream2-20250414-GGUF
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
**Audio models**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Ultravox 0.5
|
||||||
|
(tool_name) -hf ggml-org/ultravox-v0_5-llama-3_2-1b-GGUF
|
||||||
|
(tool_name) -hf ggml-org/ultravox-v0_5-llama-3_1-8b-GGUF
|
||||||
|
|
||||||
|
# Qwen2-Audio and SeaLLM-Audio
|
||||||
|
# note: no pre-quantized GGUF this model, as they have very poor result
|
||||||
|
# ref: https://github.com/ggml-org/llama.cpp/pull/13760
|
||||||
|
```
|
||||||
|
|
||||||
|
**Mixed modalities**:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Qwen2.5 Omni
|
||||||
|
# Capabilities: audio input, vision input
|
||||||
|
(tool_name) -hf ggml-org/Qwen2.5-Omni-3B-GGUF
|
||||||
|
(tool_name) -hf ggml-org/Qwen2.5-Omni-7B-GGUF
|
||||||
|
```
|
||||||
|
|
||||||
|
## Finding more models:
|
||||||
|
|
||||||
|
GGUF models on Huggingface with vision capabilities can be found here: https://huggingface.co/models?pipeline_tag=image-text-to-text&sort=trending&search=gguf
|
@ -9,15 +9,15 @@ The implementation is based on llava, and is compatible with llava and mobileVLM
|
|||||||
Notice: The overall process of model inference for both **MobileVLM** and **MobileVLM_V2** models is the same, but the process of model conversion is a little different. Therefore, using **MobileVLM-1.7B** as an example, the different conversion step will be shown.
|
Notice: The overall process of model inference for both **MobileVLM** and **MobileVLM_V2** models is the same, but the process of model conversion is a little different. Therefore, using **MobileVLM-1.7B** as an example, the different conversion step will be shown.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
Build with cmake or run `make llama-llava-cli` to build it.
|
|
||||||
|
|
||||||
After building, run: `./llama-llava-cli` to see the usage. For example:
|
Build the `llama-mtmd-cli` binary.
|
||||||
|
|
||||||
|
After building, run: `./llama-mtmd-cli` to see the usage. For example:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
./llama-llava-cli -m MobileVLM-1.7B/ggml-model-q4_k.gguf \
|
./llama-mtmd-cli -m MobileVLM-1.7B/ggml-model-q4_k.gguf \
|
||||||
--mmproj MobileVLM-1.7B/mmproj-model-f16.gguf \
|
--mmproj MobileVLM-1.7B/mmproj-model-f16.gguf \
|
||||||
--image path/to/an/image.jpg \
|
--chat-template deepseek
|
||||||
-p "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: ").c_str(), params->n_batch, &n_past, false);
|
|
||||||
if (num_image_embeds > 1) {
|
|
||||||
if (has_minicpmv_projector == 2) {
|
|
||||||
size_t num_image_embeds_col = clip_uhd_num_image_embeds_col(ctx_llava->ctx_clip);
|
|
||||||
eval_string(ctx_llava->ctx_llama, std::string("<slice>").c_str(), params->n_batch, &n_past, false);
|
|
||||||
for (size_t i = 0; i < (num_image_embeds-1)/num_image_embeds_col; ++i) {
|
|
||||||
for (size_t j = 0; j < num_image_embeds_col; ++j) {
|
|
||||||
eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false);
|
|
||||||
if (j == num_image_embeds_col - 1) {
|
|
||||||
eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
eval_string(ctx_llava->ctx_llama, std::string("</slice>").c_str(), params->n_batch, &n_past, false);
|
|
||||||
}
|
|
||||||
else if (has_minicpmv_projector == 3 || has_minicpmv_projector == 4) {
|
|
||||||
size_t num_image_embeds_col = clip_uhd_num_image_embeds_col(ctx_llava->ctx_clip);
|
|
||||||
for (size_t i = 0; i < (num_image_embeds-1)/num_image_embeds_col; ++i) {
|
|
||||||
for (size_t j = 0; j < num_image_embeds_col; ++j) {
|
|
||||||
eval_string(ctx_llava->ctx_llama, std::string("<slice>").c_str(), params->n_batch, &n_past, false);
|
|
||||||
process_eval_image_embed(ctx_llava, embeds, params->n_batch, &n_past, idx++);
|
|
||||||
eval_string(ctx_llava->ctx_llama, std::string("</slice>").c_str(), params->n_batch, &n_past, false);
|
|
||||||
if (j == num_image_embeds_col - 1) {
|
|
||||||
eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
LOG_INF("%s: image token past: %d\n", __func__, n_past);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char * sample(struct common_sampler * smpl,
|
|
||||||
struct llama_context * ctx_llama,
|
|
||||||
int * n_past) {
|
|
||||||
const llama_token id = common_sampler_sample(smpl, ctx_llama, -1);
|
|
||||||
common_sampler_accept(smpl, id, true);
|
|
||||||
|
|
||||||
const llama_model * model = llama_get_model(ctx_llama);
|
|
||||||
const llama_vocab * vocab = llama_model_get_vocab(model);
|
|
||||||
|
|
||||||
static std::string ret;
|
|
||||||
if (llama_vocab_is_eog(vocab, id)) {
|
|
||||||
ret = "</s>";
|
|
||||||
} else {
|
|
||||||
ret = common_token_to_piece(ctx_llama, id);
|
|
||||||
}
|
|
||||||
eval_id(ctx_llama, id, n_past);
|
|
||||||
return ret.c_str();
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct llava_context * minicpmv_init(common_params * params, const std::string & fname, int &n_past){
|
|
||||||
auto * ctx_clip = clip_init_context(params);
|
|
||||||
auto * embeds = llava_image_embed_make_with_filename(ctx_clip, params->cpuparams.n_threads, fname.c_str());
|
|
||||||
if (!embeds) {
|
|
||||||
LOG_ERR("failed to load image %s. Terminating\n\n", fname.c_str());
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// process the prompt
|
|
||||||
if (params->prompt.empty() && params->interactive == false) {
|
|
||||||
LOG_ERR("prompt should be given or interactive mode should be on");
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto * model = llava_init(params);
|
|
||||||
if (model == NULL) {
|
|
||||||
fprintf(stderr, "%s: error: failed to init minicpmv model\n", __func__);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
const int64_t t_llava_init_start_us = ggml_time_us();
|
|
||||||
auto * ctx_llava = llava_init_context(params, model);
|
|
||||||
ctx_llava->ctx_clip = ctx_clip;
|
|
||||||
const int64_t t_llava_init_end_us = ggml_time_us();
|
|
||||||
float t_llava_init_ms = (t_llava_init_end_us - t_llava_init_start_us) / 1000.0;
|
|
||||||
LOG_INF("%s: llava init in %8.2f ms.\n", __func__, t_llava_init_ms);
|
|
||||||
|
|
||||||
const int64_t t_process_image_start_us = ggml_time_us();
|
|
||||||
process_image(ctx_llava, embeds, params, n_past);
|
|
||||||
const int64_t t_process_image_end_us = ggml_time_us();
|
|
||||||
float t_process_image_ms = (t_process_image_end_us - t_process_image_start_us) / 1000.0;
|
|
||||||
LOG_INF("%s: llama process image in %8.2f ms.\n", __func__, t_process_image_ms);
|
|
||||||
|
|
||||||
llava_image_embed_free(embeds);
|
|
||||||
return ctx_llava;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct common_sampler * llama_init(struct llava_context * ctx_llava, common_params * params, const std::string & prompt, int & n_past, bool is_first = false){
|
|
||||||
std::string user_prompt = prompt;
|
|
||||||
int has_minicpmv_projector = clip_is_minicpmv(ctx_llava->ctx_clip);
|
|
||||||
if (!is_first) {
|
|
||||||
if (has_minicpmv_projector == 2) {
|
|
||||||
user_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n" + prompt;
|
|
||||||
}
|
|
||||||
else if (has_minicpmv_projector == 3) {
|
|
||||||
user_prompt = "<|im_start|>user\n" + prompt;
|
|
||||||
}
|
|
||||||
else if (has_minicpmv_projector == 4) {
|
|
||||||
user_prompt = "<|im_start|>user\n" + prompt;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false);
|
|
||||||
if (has_minicpmv_projector == 2) {
|
|
||||||
eval_string(ctx_llava->ctx_llama, "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", params->n_batch, &n_past, false);
|
|
||||||
}
|
|
||||||
else if (has_minicpmv_projector == 3) {
|
|
||||||
eval_string(ctx_llava->ctx_llama, "<|im_end|><|im_start|>assistant\n", params->n_batch, &n_past, false);
|
|
||||||
}
|
|
||||||
else if (has_minicpmv_projector == 4) {
|
|
||||||
eval_string(ctx_llava->ctx_llama, "<|im_end|><|im_start|>assistant\n", params->n_batch, &n_past, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate the response
|
|
||||||
|
|
||||||
LOG_INF("\n");
|
|
||||||
|
|
||||||
struct common_sampler * smpl = common_sampler_init(ctx_llava->model, params->sampling);
|
|
||||||
return smpl;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char * llama_loop(struct llava_context * ctx_llava,struct common_sampler * smpl, int &n_past){
|
|
||||||
|
|
||||||
const char * tmp = sample(smpl, ctx_llava->ctx_llama, &n_past);
|
|
||||||
return tmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
int main(int argc, char ** argv) {
|
|
||||||
ggml_time_init();
|
|
||||||
|
|
||||||
common_params params;
|
|
||||||
|
|
||||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, show_additional_info)) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
common_init();
|
|
||||||
|
|
||||||
if (params.mmproj.path.empty() || (params.image.empty())) {
|
|
||||||
show_additional_info(argc, argv);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (auto & image : params.image) {
|
|
||||||
int n_past = 0;
|
|
||||||
auto * ctx_llava = minicpmv_init(¶ms, image, n_past);
|
|
||||||
|
|
||||||
if (!params.prompt.empty()) {
|
|
||||||
LOG("<user>%s\n", params.prompt.c_str());
|
|
||||||
LOG("<assistant>");
|
|
||||||
auto * smpl = llama_init(ctx_llava, ¶ms, params.prompt, n_past, true);
|
|
||||||
const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
|
|
||||||
std::string response;
|
|
||||||
bool have_tmp = false;
|
|
||||||
for (int i = 0; i < max_tgt_len; i++) {
|
|
||||||
const auto * tmp = llama_loop(ctx_llava, smpl, n_past);
|
|
||||||
response += tmp;
|
|
||||||
if (strcmp(tmp, "</s>") == 0){
|
|
||||||
if (!have_tmp) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (strstr(tmp, "###")) break; // Yi-VL behavior
|
|
||||||
have_tmp = true;
|
|
||||||
printf("%s", tmp);
|
|
||||||
if (strstr(response.c_str(), "<user>")) break; // minicpm-v
|
|
||||||
|
|
||||||
fflush(stdout);
|
|
||||||
}
|
|
||||||
common_sampler_free(smpl);
|
|
||||||
}else {
|
|
||||||
while (true) {
|
|
||||||
LOG("<user>");
|
|
||||||
std::string prompt;
|
|
||||||
std::getline(std::cin, prompt);
|
|
||||||
LOG("<assistant>");
|
|
||||||
auto * smpl = llama_init(ctx_llava, ¶ms, prompt, n_past, true);
|
|
||||||
const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
|
|
||||||
std::string response;
|
|
||||||
for (int i = 0; i < max_tgt_len; i++) {
|
|
||||||
const auto * tmp = llama_loop(ctx_llava, smpl, n_past);
|
|
||||||
response += tmp;
|
|
||||||
if (strcmp(tmp, "</s>") == 0) break;
|
|
||||||
printf("%s", tmp);// mistral llava-1.6
|
|
||||||
if (strstr(response.c_str(), "<user>")) break; // minicpm-v
|
|
||||||
fflush(stdout);
|
|
||||||
}
|
|
||||||
common_sampler_free(smpl);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
printf("\n");
|
|
||||||
llama_perf_context_print(ctx_llava->ctx_llama);
|
|
||||||
|
|
||||||
ctx_llava->model = NULL;
|
|
||||||
llava_free(ctx_llava);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
@ -1,341 +0,0 @@
|
|||||||
#include "clip.h"
|
|
||||||
#include "clip-impl.h"
|
|
||||||
#include "mtmd.h"
|
|
||||||
|
|
||||||
#include "llama.h"
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <cerrno>
|
|
||||||
#include <cstdio>
|
|
||||||
#include <cstdlib>
|
|
||||||
#include <cstring>
|
|
||||||
#include <limits>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
struct mtmd_context {
|
|
||||||
struct clip_ctx * ctx_clip;
|
|
||||||
const struct llama_model * text_model;
|
|
||||||
std::vector<float> image_embd_v; // image embedding vector
|
|
||||||
bool print_timings;
|
|
||||||
int n_threads;
|
|
||||||
std::string image_marker;
|
|
||||||
|
|
||||||
// TODO @ngxson : add timings
|
|
||||||
|
|
||||||
mtmd_context(const char * mmproj_fname,
|
|
||||||
const llama_model * text_model,
|
|
||||||
const mtmd_context_params & ctx_params) : print_timings(ctx_params.print_timings), n_threads(ctx_params.n_threads), image_marker(ctx_params.image_marker) {
|
|
||||||
clip_context_params ctx_clip_params;
|
|
||||||
ctx_clip_params.use_gpu = ctx_params.use_gpu;
|
|
||||||
ctx_clip_params.verbosity = ctx_params.verbosity;
|
|
||||||
ctx_clip = clip_init(mmproj_fname, ctx_clip_params);
|
|
||||||
if (!ctx_clip) {
|
|
||||||
throw std::runtime_error(string_format("Failed to load CLIP model from %s\n", mmproj_fname));
|
|
||||||
}
|
|
||||||
this->text_model = text_model;
|
|
||||||
}
|
|
||||||
|
|
||||||
~mtmd_context() {
|
|
||||||
clip_free(ctx_clip);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct mtmd_image_tokens_data {
|
|
||||||
clip_image_f32_batch batch_f32; // preprocessed image patches
|
|
||||||
};
|
|
||||||
|
|
||||||
struct mtmd_image_tokens {
|
|
||||||
uint32_t nx; // number of tokens in x direction
|
|
||||||
uint32_t ny; // number of tokens in y direction
|
|
||||||
uint32_t n_tokens() const { return nx * ny; }
|
|
||||||
clip_image_f32_batch batch_f32; // preprocessed image patches
|
|
||||||
};
|
|
||||||
|
|
||||||
mtmd_context * mtmd_init_from_file(const char * mmproj_fname,
|
|
||||||
const struct llama_model * text_model,
|
|
||||||
const struct mtmd_context_params ctx_params) {
|
|
||||||
try {
|
|
||||||
return new mtmd_context(mmproj_fname, text_model, ctx_params);
|
|
||||||
} catch (const std::exception & e) {
|
|
||||||
LOG_ERR("%s: error: %s\n", __func__, e.what());
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void mtmd_free(mtmd_context * ctx) {
|
|
||||||
if (ctx) {
|
|
||||||
delete ctx;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// copied from common_tokenize
|
|
||||||
static std::vector<llama_token> mtmd_tokenize_text_internal(
|
|
||||||
const struct llama_vocab * vocab,
|
|
||||||
const std::string & text,
|
|
||||||
bool add_special,
|
|
||||||
bool parse_special) {
|
|
||||||
// upper limit for the number of tokens
|
|
||||||
int n_tokens = text.length() + 2 * add_special;
|
|
||||||
std::vector<llama_token> result(n_tokens);
|
|
||||||
n_tokens = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
|
|
||||||
if (n_tokens < 0) {
|
|
||||||
result.resize(-n_tokens);
|
|
||||||
int check = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
|
|
||||||
GGML_ASSERT(check == -n_tokens);
|
|
||||||
} else {
|
|
||||||
result.resize(n_tokens);
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
mtmd_input_chunks * mtmd_tokenize(mtmd_context * ctx,
|
|
||||||
const mtmd_input_text & text,
|
|
||||||
const std::vector<mtmd_bitmap> & bitmaps) {
|
|
||||||
mtmd_input_chunks * output = new mtmd_input_chunks;
|
|
||||||
auto vocab = llama_model_get_vocab(ctx->text_model);
|
|
||||||
|
|
||||||
std::string prompt_modified(text.text);
|
|
||||||
std::string marker_modified(ctx->image_marker);
|
|
||||||
projector_type proj_type = clip_get_projector_type(ctx->ctx_clip);
|
|
||||||
// a bit hacky here, but works for now
|
|
||||||
// for some models, we need to add prefix and suffix to the image embeddings
|
|
||||||
if (proj_type == PROJECTOR_TYPE_GEMMA3) {
|
|
||||||
// <start_of_image> ... (image embeddings) ... <end_of_image>
|
|
||||||
marker_modified = "<start_of_image>" + ctx->image_marker + "<end_of_image>";
|
|
||||||
string_replace_all(prompt_modified, ctx->image_marker, marker_modified);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<std::string> parts = string_split_str(text.text, ctx->image_marker);
|
|
||||||
output->clear();
|
|
||||||
output->reserve(parts.size());
|
|
||||||
|
|
||||||
size_t i_img = 0;
|
|
||||||
|
|
||||||
for (const auto & part : parts) {
|
|
||||||
//printf("tokenizing part: %s\n", part.c_str());
|
|
||||||
bool add_bos = &parts.front() == ∂
|
|
||||||
auto tokens = mtmd_tokenize_text_internal(vocab, part, text.add_special && add_bos, text.parse_special);
|
|
||||||
if (tokens.empty()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
mtmd_input_chunk chunk{
|
|
||||||
MTMD_INPUT_CHUNK_TYPE_TEXT,
|
|
||||||
std::move(tokens),
|
|
||||||
{},
|
|
||||||
};
|
|
||||||
output->emplace_back(std::move(chunk));
|
|
||||||
|
|
||||||
if (&parts.back() != &part) {
|
|
||||||
// add image token to middle of 2 parts
|
|
||||||
|
|
||||||
if (i_img >= bitmaps.size()) {
|
|
||||||
LOG_ERR("%s: error: not enough images for %d parts\n", __func__, (int)parts.size());
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
// shim layer
|
|
||||||
clip_image_u8_ptr img_u8(clip_image_u8_init());
|
|
||||||
img_u8->nx = bitmaps[i_img].nx;
|
|
||||||
img_u8->ny = bitmaps[i_img].ny;
|
|
||||||
img_u8->buf.resize(bitmaps[i_img].data.size());
|
|
||||||
std::memcpy(img_u8->buf.data(), bitmaps[i_img].data.data(), img_u8->nx * img_u8->ny * 3);
|
|
||||||
|
|
||||||
// preprocess image
|
|
||||||
clip_image_f32_batch batch_f32;
|
|
||||||
bool ok = clip_image_preprocess(ctx->ctx_clip, img_u8.get(), &batch_f32);
|
|
||||||
if (!ok) {
|
|
||||||
LOG_ERR("Unable to preprocess image\n");
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
mtmd_image_tokens * image_tokens = new mtmd_image_tokens;
|
|
||||||
image_tokens->nx = clip_n_patches(ctx->ctx_clip); // TODO @ngxson : use clip_n_patches_by_image
|
|
||||||
image_tokens->ny = 1; // TODO
|
|
||||||
image_tokens->batch_f32 = std::move(batch_f32);
|
|
||||||
|
|
||||||
mtmd_input_chunk chunk{
|
|
||||||
MTMD_INPUT_CHUNK_TYPE_IMAGE,
|
|
||||||
{},
|
|
||||||
image_tokens,
|
|
||||||
};
|
|
||||||
output->emplace_back(std::move(chunk));
|
|
||||||
i_img++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return output;
|
|
||||||
}
|
|
||||||
|
|
||||||
void mtmd_input_chunks_free(mtmd_input_chunks * chunks) {
|
|
||||||
for (auto & chunk : *chunks) {
|
|
||||||
if (chunk.type == MTMD_INPUT_CHUNK_TYPE_IMAGE && chunk.tokens_image) {
|
|
||||||
delete chunk.tokens_image;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delete chunks;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t mtmd_encode(mtmd_context * ctx, const mtmd_image_tokens * image_tokens) {
|
|
||||||
int n_mmproj_embd = clip_n_mmproj_embd(ctx->ctx_clip);
|
|
||||||
ctx->image_embd_v.resize(image_tokens->n_tokens() * n_mmproj_embd);
|
|
||||||
bool ok = clip_image_batch_encode(
|
|
||||||
ctx->ctx_clip,
|
|
||||||
ctx->n_threads,
|
|
||||||
&image_tokens->batch_f32,
|
|
||||||
ctx->image_embd_v.data());
|
|
||||||
return ok ? 0 : 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
float * mtmd_get_output_embd(mtmd_context * ctx) {
|
|
||||||
return ctx->image_embd_v.data();
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t mtmd_helper_get_n_tokens(mtmd_input_chunks * chunks) {
|
|
||||||
size_t n_tokens = 0;
|
|
||||||
for (auto & chunk : *chunks) {
|
|
||||||
if (chunk.type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
|
|
||||||
n_tokens += chunk.tokens_text.size();
|
|
||||||
} else if (chunk.type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
|
|
||||||
n_tokens += chunk.tokens_image->n_tokens();
|
|
||||||
} else {
|
|
||||||
GGML_ASSERT(false && "chunk type not supported");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n_tokens;
|
|
||||||
}
|
|
||||||
|
|
||||||
// helper struct to make working with embd batch easier
|
|
||||||
// note: this will be removed after llama_batch_ext refactoring
|
|
||||||
struct decode_embd_batch {
|
|
||||||
std::vector<llama_pos> pos;
|
|
||||||
std::vector<int32_t> n_seq_id;
|
|
||||||
std::vector<llama_seq_id> seq_id_0;
|
|
||||||
std::vector<llama_seq_id *> seq_ids;
|
|
||||||
std::vector<int8_t> logits;
|
|
||||||
llama_batch batch;
|
|
||||||
decode_embd_batch(float * embd, int32_t n_tokens, llama_pos pos_0, llama_seq_id seq_id) {
|
|
||||||
pos .resize(n_tokens);
|
|
||||||
n_seq_id.resize(n_tokens);
|
|
||||||
seq_ids .resize(n_tokens + 1);
|
|
||||||
logits .resize(n_tokens);
|
|
||||||
seq_id_0.resize(1);
|
|
||||||
seq_id_0[0] = seq_id;
|
|
||||||
seq_ids [n_tokens] = nullptr;
|
|
||||||
batch = {
|
|
||||||
/*n_tokens =*/ n_tokens,
|
|
||||||
/*tokens =*/ nullptr,
|
|
||||||
/*embd =*/ embd,
|
|
||||||
/*pos =*/ pos.data(),
|
|
||||||
/*n_seq_id =*/ n_seq_id.data(),
|
|
||||||
/*seq_id =*/ seq_ids.data(),
|
|
||||||
/*logits =*/ logits.data(),
|
|
||||||
};
|
|
||||||
for (int i = 0; i < n_tokens; i++) {
|
|
||||||
batch.pos [i] = pos_0 + i;
|
|
||||||
batch.n_seq_id[i] = 1;
|
|
||||||
batch.seq_id [i] = seq_id_0.data();
|
|
||||||
batch.logits [i] = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
int32_t mtmd_helper_eval(mtmd_context * ctx,
|
|
||||||
llama_context * lctx,
|
|
||||||
mtmd_input_chunks * chunks,
|
|
||||||
llama_pos pos0,
|
|
||||||
llama_seq_id seq_id,
|
|
||||||
int32_t n_batch) {
|
|
||||||
int32_t ret;
|
|
||||||
llama_pos n_past = pos0;
|
|
||||||
llama_batch text_batch = llama_batch_init(n_batch, 0, 1);
|
|
||||||
|
|
||||||
for (auto & chunk : *chunks) {
|
|
||||||
bool is_last = &chunk == &chunks->back();
|
|
||||||
if (chunk.type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
|
|
||||||
// TODO @ngxson : may need to split into smaller batches
|
|
||||||
text_batch.n_tokens = chunk.tokens_text.size();
|
|
||||||
for (size_t i = 0; i < chunk.tokens_text.size(); i++) {
|
|
||||||
text_batch.token [i] = chunk.tokens_text[i];
|
|
||||||
text_batch.pos [i] = n_past++;
|
|
||||||
text_batch.n_seq_id[i] = 1;
|
|
||||||
text_batch.seq_id [i][0] = seq_id;
|
|
||||||
text_batch.logits [i] = false;
|
|
||||||
}
|
|
||||||
if (is_last) {
|
|
||||||
// always get logits for last input chunk
|
|
||||||
text_batch.logits[text_batch.n_tokens - 1] = true;
|
|
||||||
}
|
|
||||||
ret = llama_decode(lctx, text_batch);
|
|
||||||
if (ret != 0) {
|
|
||||||
LOG_ERR("failed to decode text\n");
|
|
||||||
llama_batch_free(text_batch);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if (chunk.type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
|
|
||||||
GGML_ASSERT(!is_last && "logits for last image chunk is not yet support");
|
|
||||||
GGML_ASSERT(chunk.tokens_image != nullptr);
|
|
||||||
int64_t t0 = ggml_time_ms();
|
|
||||||
if (ctx->print_timings) {
|
|
||||||
LOG_INF("encoding image...\n");
|
|
||||||
}
|
|
||||||
ret = mtmd_encode(ctx, chunk.tokens_image);
|
|
||||||
if (ret != 0) {
|
|
||||||
LOG_ERR("failed to encode image\n");
|
|
||||||
llama_batch_free(text_batch);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
if (ctx->print_timings) {
|
|
||||||
LOG_INF("image encoded in %" PRId64 " ms\n", ggml_time_ms() - t0);
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t n_tokens = chunk.tokens_image->n_tokens();
|
|
||||||
float * embd = mtmd_get_output_embd(ctx);
|
|
||||||
decode_embd_batch batch_img(embd, n_tokens, n_past, 0);
|
|
||||||
int64_t t1 = ggml_time_ms();
|
|
||||||
ret = llama_decode(lctx, batch_img.batch);
|
|
||||||
if (ret != 0) {
|
|
||||||
LOG_ERR("failed to decode image\n");
|
|
||||||
llama_batch_free(text_batch);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
if (ctx->print_timings) {
|
|
||||||
LOG_INF("image decoded in %" PRId64 " ms\n", ggml_time_ms() - t1);
|
|
||||||
}
|
|
||||||
|
|
||||||
n_past += n_tokens;
|
|
||||||
|
|
||||||
} else {
|
|
||||||
GGML_ASSERT(false && "chunk type not supported");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_batch_free(text_batch);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t mtmd_helper_bitmap_init_from_buf(const unsigned char * buf, size_t len, mtmd_bitmap & output) {
|
|
||||||
clip_image_u8_ptr img_u8(clip_image_u8_init());
|
|
||||||
bool ok = clip_image_load_from_bytes(buf, len, img_u8.get());
|
|
||||||
if (!ok) {
|
|
||||||
LOG_ERR("Unable to load image from buffer\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
unsigned char * data = clip_image_u8_get_data(img_u8.get(), &output.nx, &output.ny);
|
|
||||||
output.data.resize(output.nx * output.ny * 3);
|
|
||||||
std::memcpy(output.data.data(), data, output.nx * output.ny * 3);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t mtmd_helper_bitmap_init_from_file(const char * fname, mtmd_bitmap & output) {
|
|
||||||
clip_image_u8_ptr img_u8(clip_image_u8_init());
|
|
||||||
bool ok = clip_image_load_from_file(fname, img_u8.get());
|
|
||||||
if (!ok) {
|
|
||||||
LOG_ERR("Unable to load image %s\n", fname);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
unsigned char * data = clip_image_u8_get_data(img_u8.get(), &output.nx, &output.ny);
|
|
||||||
output.data.resize(output.nx * output.ny * 3);
|
|
||||||
std::memcpy(output.data.data(), data, output.nx * output.ny * 3);
|
|
||||||
return 0;
|
|
||||||
}
|
|
@ -1,146 +0,0 @@
|
|||||||
#ifndef MTMD_H
|
|
||||||
#define MTMD_H
|
|
||||||
|
|
||||||
#include "ggml.h"
|
|
||||||
#include "llama.h"
|
|
||||||
#include "clip.h"
|
|
||||||
|
|
||||||
#include <vector>
|
|
||||||
#include <cinttypes>
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
#ifdef LLAMA_SHARED
|
|
||||||
# if defined(_WIN32) && !defined(__MINGW32__)
|
|
||||||
# ifdef LLAMA_BUILD
|
|
||||||
# define MTMD_API __declspec(dllexport)
|
|
||||||
# else
|
|
||||||
# define MTMD_API __declspec(dllimport)
|
|
||||||
# endif
|
|
||||||
# else
|
|
||||||
# define MTMD_API __attribute__ ((visibility ("default")))
|
|
||||||
# endif
|
|
||||||
#else
|
|
||||||
# define MTMD_API
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
|
|
||||||
enum mtmd_input_chunk_type {
|
|
||||||
MTMD_INPUT_CHUNK_TYPE_TEXT,
|
|
||||||
MTMD_INPUT_CHUNK_TYPE_IMAGE,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct mtmd_context;
|
|
||||||
struct mtmd_image_tokens;
|
|
||||||
|
|
||||||
// represents raw image data, layout is RGBRGBRGB...
|
|
||||||
// length of data must be nx * ny * 3
|
|
||||||
struct mtmd_bitmap {
|
|
||||||
uint32_t nx;
|
|
||||||
uint32_t ny;
|
|
||||||
std::vector<unsigned char> data;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct mtmd_input_chunk {
|
|
||||||
mtmd_input_chunk_type type;
|
|
||||||
std::vector<llama_token> tokens_text;
|
|
||||||
mtmd_image_tokens * tokens_image = nullptr;
|
|
||||||
};
|
|
||||||
|
|
||||||
using mtmd_input_chunks = std::vector<mtmd_input_chunk>;
|
|
||||||
|
|
||||||
struct mtmd_context_params {
|
|
||||||
bool use_gpu = true;
|
|
||||||
bool print_timings = true;
|
|
||||||
int n_threads = 4;
|
|
||||||
enum ggml_log_level verbosity = GGML_LOG_LEVEL_INFO;
|
|
||||||
const char * image_marker = "<__image__>";
|
|
||||||
};
|
|
||||||
|
|
||||||
struct mtmd_input_text {
|
|
||||||
std::string text;
|
|
||||||
bool add_special;
|
|
||||||
bool parse_special;
|
|
||||||
};
|
|
||||||
|
|
||||||
// initialize the mtmd context
|
|
||||||
// return nullptr on failure
|
|
||||||
MTMD_API mtmd_context * mtmd_init_from_file(const char * mmproj_fname,
|
|
||||||
const llama_model * text_model,
|
|
||||||
const mtmd_context_params ctx_params);
|
|
||||||
|
|
||||||
MTMD_API void mtmd_free(mtmd_context * ctx);
|
|
||||||
|
|
||||||
// tokenize an input text prompt and an image
|
|
||||||
// the prompt must have the input image marker (default: "<__image__>") in it
|
|
||||||
// the marker will be replaced with the image tokens
|
|
||||||
// for example:
|
|
||||||
// "here is an image: <__image__>\ndescribe it in detail."
|
|
||||||
// this will gives 3 chunks:
|
|
||||||
// 1. "here is an image: <start_of_image>"
|
|
||||||
// 2. (image tokens)
|
|
||||||
// 3. "<end_of_image>\ndescribe it in detail."
|
|
||||||
// number of bitmaps must be equal to the number of image markers in the prompt
|
|
||||||
// this function is thread-safe (shared ctx)
|
|
||||||
MTMD_API mtmd_input_chunks * mtmd_tokenize(mtmd_context * ctx,
|
|
||||||
const mtmd_input_text & text,
|
|
||||||
const std::vector<mtmd_bitmap> & bitmaps);
|
|
||||||
|
|
||||||
// free image chunk data
|
|
||||||
MTMD_API void mtmd_input_chunks_free(mtmd_input_chunks * chunks);
|
|
||||||
|
|
||||||
// returns 0 on success
|
|
||||||
MTMD_API int32_t mtmd_encode(mtmd_context * ctx,
|
|
||||||
const mtmd_image_tokens * image_tokens);
|
|
||||||
|
|
||||||
// get output embeddings from the last encode pass
|
|
||||||
MTMD_API float * mtmd_get_output_embd(mtmd_context * ctx);
|
|
||||||
|
|
||||||
//
|
|
||||||
// helper functions (can be implemented based on other functions)
|
|
||||||
//
|
|
||||||
|
|
||||||
// helper to count the total number of tokens from a list of chunks, useful to keep track of n_past
|
|
||||||
MTMD_API size_t mtmd_helper_get_n_tokens(mtmd_input_chunks * chunks);
|
|
||||||
|
|
||||||
// helper function that automatically:
|
|
||||||
// 1. run llama_decode() on text chunks
|
|
||||||
// 2. run mtmd_encode() on image chunks, then mtmd_get_output_embd() and then llama_decode()
|
|
||||||
// if any of the mtmd_encode() or llama_decode() calls return non-zero, stop and forward the error
|
|
||||||
// otherwise, returns 0 on success
|
|
||||||
MTMD_API int32_t mtmd_helper_eval(mtmd_context * ctx,
|
|
||||||
llama_context * lctx,
|
|
||||||
mtmd_input_chunks * chunks,
|
|
||||||
llama_pos pos0,
|
|
||||||
llama_seq_id seq_id,
|
|
||||||
int32_t n_batch);
|
|
||||||
|
|
||||||
// helper function to construct a mtmd_bitmap from a file
|
|
||||||
// returns 0 on success
|
|
||||||
// this function is thread-safe
|
|
||||||
MTMD_API int32_t mtmd_helper_bitmap_init_from_file(const char * fname, mtmd_bitmap & output);
|
|
||||||
|
|
||||||
// helper function to construct a mtmd_bitmap from a buffer
|
|
||||||
// the buffer must be an image in format supported by stb_image (jpg, png, bmp, gif, etc.)
|
|
||||||
// returns 0 on success
|
|
||||||
// this function is thread-safe
|
|
||||||
MTMD_API int32_t mtmd_helper_bitmap_init_from_buf(const unsigned char * buf, size_t len, mtmd_bitmap & output);
|
|
||||||
|
|
||||||
// convenient unique_ptr wrappers
|
|
||||||
struct mtmd_context_deleter {
|
|
||||||
void operator()(mtmd_context * val) { mtmd_free(val); }
|
|
||||||
};
|
|
||||||
using mtmd_context_ptr = std::unique_ptr<mtmd_context, mtmd_context_deleter>;
|
|
||||||
|
|
||||||
struct mtmd_input_chunks_deleter {
|
|
||||||
void operator()(mtmd_input_chunks * val) { mtmd_input_chunks_free(val); }
|
|
||||||
};
|
|
||||||
using mtmd_input_chunks_ptr = std::unique_ptr<mtmd_input_chunks, mtmd_input_chunks_deleter>;
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
static_assert(false && "C header is not yet supported by this library");
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user