name: Create Release on: workflow_dispatch: # allows manual triggering inputs: create_release: description: 'Create new release' required: true type: boolean push: branches: - master paths: ['.github/workflows/release.yml', '**/CMakeLists.txt', '**/.cmake', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal', '**/*.comp'] concurrency: group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} cancel-in-progress: true env: BRANCH_NAME: ${{ github.head_ref || github.ref_name }} CMAKE_ARGS: "-DLLAMA_BUILD_EXAMPLES=OFF -DLLAMA_BUILD_TESTS=OFF -DLLAMA_BUILD_TOOLS=ON -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON" jobs: macOS-arm64: runs-on: macos-14 steps: - name: Clone id: checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: ccache uses: hendrikmuhs/ccache-action@v1.2.16 with: key: macOS-latest-cmake-arm64 evict-old-files: 1d - name: Dependencies id: depends continue-on-error: true run: | brew update brew install curl - name: Build id: cmake_build run: | sysctl -a cmake -B build \ -DCMAKE_BUILD_RPATH="@loader_path" \ -DLLAMA_FATAL_WARNINGS=ON \ -DGGML_METAL_USE_BF16=ON \ -DGGML_METAL_EMBED_LIBRARY=ON \ -DGGML_RPC=ON \ ${{ env.CMAKE_ARGS }} cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) - name: Determine tag name id: tag uses: ./.github/actions/get-tag-name - name: Pack artifacts id: pack_artifacts run: | cp LICENSE ./build/bin/ zip -r llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip ./build/bin/* - name: Upload artifacts uses: actions/upload-artifact@v4 with: path: llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.zip name: llama-bin-macos-arm64.zip macOS-x64: runs-on: macos-13 steps: - name: Clone id: checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: ccache uses: hendrikmuhs/ccache-action@v1.2.16 with: key: macOS-latest-cmake-x64 evict-old-files: 1d - name: Dependencies id: depends continue-on-error: true run: | brew update brew install curl - name: Build id: cmake_build run: | sysctl -a # Metal is disabled due to intermittent failures with Github runners not having a GPU: # https://github.com/ggml-org/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313 cmake -B build \ -DCMAKE_BUILD_RPATH="@loader_path" \ -DLLAMA_FATAL_WARNINGS=ON \ -DGGML_METAL=OFF \ -DGGML_RPC=ON cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) - name: Determine tag name id: tag uses: ./.github/actions/get-tag-name - name: Pack artifacts id: pack_artifacts run: | cp LICENSE ./build/bin/ zip -r llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip ./build/bin/* - name: Upload artifacts uses: actions/upload-artifact@v4 with: path: llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip name: llama-bin-macos-x64.zip ubuntu-22-cpu: strategy: matrix: include: - build: 'x64' os: ubuntu-22.04 - build: 'arm64' os: ubuntu-22.04-arm runs-on: ${{ matrix.os }} steps: - name: Clone id: checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: ccache uses: hendrikmuhs/ccache-action@v1.2.16 with: key: ubuntu-cpu-cmake evict-old-files: 1d - name: Dependencies id: depends run: | sudo apt-get update sudo apt-get install build-essential libcurl4-openssl-dev - name: Build id: cmake_build run: | cmake -B build \ -DLLAMA_FATAL_WARNINGS=ON \ ${{ env.CMAKE_ARGS }} cmake --build build --config Release -j $(nproc) - name: Determine tag name id: tag uses: ./.github/actions/get-tag-name - name: Pack artifacts id: pack_artifacts run: | cp LICENSE ./build/bin/ zip -r llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.zip ./build/bin/* - name: Upload artifacts uses: actions/upload-artifact@v4 with: path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-${{ matrix.build }}.zip name: llama-bin-ubuntu-${{ matrix.build }}.zip ubuntu-22-vulkan: runs-on: ubuntu-22.04 steps: - name: Clone id: checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: ccache uses: hendrikmuhs/ccache-action@v1.2.16 with: key: ubuntu-22-cmake-vulkan evict-old-files: 1d - name: Dependencies id: depends run: | wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add - sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list sudo apt-get update -y sudo apt-get install -y build-essential mesa-vulkan-drivers vulkan-sdk libcurl4-openssl-dev - name: Build id: cmake_build run: | cmake -B build \ -DGGML_VULKAN=ON \ ${{ env.CMAKE_ARGS }} cmake --build build --config Release -j $(nproc) - name: Determine tag name id: tag uses: ./.github/actions/get-tag-name - name: Pack artifacts id: pack_artifacts run: | cp LICENSE ./build/bin/ zip -r llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.zip ./build/bin/* - name: Upload artifacts uses: actions/upload-artifact@v4 with: path: llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.zip name: llama-bin-ubuntu-vulkan-x64.zip windows: runs-on: windows-latest env: OPENBLAS_VERSION: 0.3.23 VULKAN_VERSION: 1.4.309.0 strategy: matrix: include: - build: 'cpu-x64' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_OPENMP=OFF' #- build: 'openblas-x64' # defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_OPENMP=OFF -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"' - build: 'vulkan-x64' defines: '-DGGML_NATIVE=OFF -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_VULKAN=ON' - build: 'cpu-arm64' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF' - build: 'opencl-adreno-arm64' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON' steps: - name: Clone id: checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: ccache uses: hendrikmuhs/ccache-action@v1.2.16 with: key: windows-latest-cmake-${{ matrix.build }} variant: ccache evict-old-files: 1d - name: Download OpenBLAS id: get_openblas if: ${{ matrix.build == 'openblas-x64' }} run: | curl.exe -o $env:RUNNER_TEMP/openblas.zip -L "https://github.com/xianyi/OpenBLAS/releases/download/v${env:OPENBLAS_VERSION}/OpenBLAS-${env:OPENBLAS_VERSION}-x64.zip" curl.exe -o $env:RUNNER_TEMP/OpenBLAS.LICENSE.txt -L "https://github.com/xianyi/OpenBLAS/raw/v${env:OPENBLAS_VERSION}/LICENSE" mkdir $env:RUNNER_TEMP/openblas tar.exe -xvf $env:RUNNER_TEMP/openblas.zip -C $env:RUNNER_TEMP/openblas $vcdir = $(vswhere -latest -products * -requires Microsoft.VisualStudio.Component.VC.Tools.x86.x64 -property installationPath) $msvc = $(join-path $vcdir $('VC\Tools\MSVC\'+$(gc -raw $(join-path $vcdir 'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt')).Trim())) $lib = $(join-path $msvc 'bin\Hostx64\x64\lib.exe') & $lib /machine:x64 "/def:${env:RUNNER_TEMP}/openblas/lib/libopenblas.def" "/out:${env:RUNNER_TEMP}/openblas/lib/openblas.lib" /name:openblas.dll - name: Install Vulkan SDK id: get_vulkan if: ${{ matrix.build == 'vulkan-x64' }} run: | curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/VulkanSDK-${env:VULKAN_VERSION}-Installer.exe" & "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}" Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin" - name: Install Ninja id: install_ninja run: | choco install ninja - name: Install OpenCL Headers and Libs id: install_opencl if: ${{ matrix.build == 'opencl-adreno-arm64' }} run: | git clone https://github.com/KhronosGroup/OpenCL-Headers cd OpenCL-Headers cmake -B build ` -DBUILD_TESTING=OFF ` -DOPENCL_HEADERS_BUILD_TESTING=OFF ` -DOPENCL_HEADERS_BUILD_CXX_TESTS=OFF ` -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release" cmake --build build --target install git clone https://github.com/KhronosGroup/OpenCL-ICD-Loader cd OpenCL-ICD-Loader cmake -B build-arm64-release ` -A arm64 ` -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" ` -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release" cmake --build build-arm64-release --target install --config release - name: libCURL id: get_libcurl uses: ./.github/actions/windows-setup-curl - name: Build id: cmake_build env: CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }} run: | cmake -S . -B build ${{ matrix.defines }} ` -DCURL_LIBRARY="$env:CURL_PATH/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="$env:CURL_PATH/include" ` ${{ env.CMAKE_ARGS }} cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS} - name: Add libopenblas.dll id: add_libopenblas_dll if: ${{ matrix.build == 'openblas-x64' }} run: | cp $env:RUNNER_TEMP/openblas/bin/libopenblas.dll ./build/bin/Release/openblas.dll cp $env:RUNNER_TEMP/OpenBLAS.LICENSE.txt ./build/bin/Release/OpenBLAS-${env:OPENBLAS_VERSION}.txt - name: Determine tag name id: tag uses: ./.github/actions/get-tag-name - name: Pack artifacts id: pack_artifacts env: CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }} run: | Copy-Item $env:CURL_PATH\bin\libcurl-x64.dll .\build\bin\Release\libcurl-x64.dll 7z a llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}.zip .\build\bin\Release\* - name: Upload artifacts uses: actions/upload-artifact@v4 with: path: llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}.zip name: llama-bin-win-${{ matrix.build }}.zip windows-cuda: runs-on: windows-2019 strategy: matrix: cuda: ['12.4', '11.7'] steps: - name: Clone id: checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: Install ccache uses: hendrikmuhs/ccache-action@v1.2.16 with: key: windows-cuda-${{ matrix.cuda }} variant: ccache evict-old-files: 1d - name: Install Cuda Toolkit uses: ./.github/actions/windows-setup-cuda with: cuda_version: ${{ matrix.cuda }} - name: Install Ninja id: install_ninja run: | choco install ninja - name: libCURL id: get_libcurl uses: ./.github/actions/windows-setup-curl - name: Build id: cmake_build shell: cmd env: CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }} run: | call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat" cmake -S . -B build -G "Ninja Multi-Config" ^ -DGGML_NATIVE=OFF ^ -DGGML_BACKEND_DL=ON ^ -DGGML_CPU_ALL_VARIANTS=ON ^ -DGGML_CUDA=ON ^ -DCURL_LIBRARY="%CURL_PATH%/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="%CURL_PATH%/include" ^ ${{ env.CMAKE_ARGS }} set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1 cmake --build build --config Release -j %NINJA_JOBS% -t ggml cmake --build build --config Release - name: Determine tag name id: tag uses: ./.github/actions/get-tag-name - name: Pack artifacts id: pack_artifacts env: CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }} run: | cp $env:CURL_PATH\bin\libcurl-x64.dll .\build\bin\Release\libcurl-x64.dll 7z a llama-${{ steps.tag.outputs.name }}-bin-win-cuda${{ matrix.cuda }}-x64.zip .\build\bin\Release\* - name: Upload artifacts uses: actions/upload-artifact@v4 with: path: llama-${{ steps.tag.outputs.name }}-bin-win-cuda${{ matrix.cuda }}-x64.zip name: llama-bin-win-cuda${{ matrix.cuda }}-x64.zip - name: Copy and pack Cuda runtime run: | echo "Cuda install location: ${{ env.CUDA_PATH }}" $dst='.\build\bin\cudart\' robocopy "${{env.CUDA_PATH}}\bin" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll robocopy "${{env.CUDA_PATH}}\lib" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll 7z a cudart-llama-bin-win-cuda${{ matrix.cuda }}-x64.zip $dst\* - name: Upload Cuda runtime uses: actions/upload-artifact@v4 with: path: cudart-llama-bin-win-cuda${{ matrix.cuda }}-x64.zip name: cudart-llama-bin-win-cuda${{ matrix.cuda }}-x64.zip windows-sycl: runs-on: windows-latest defaults: run: shell: bash env: WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/b380d914-366b-4b77-a74a-05e3c38b3514/intel-oneapi-base-toolkit-2025.0.0.882_offline.exe WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel:intel.oneapi.win.dnnl:intel.oneapi.win.tbb.devel ONEAPI_ROOT: "C:/Program Files (x86)/Intel/oneAPI" steps: - name: Clone id: checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: ccache uses: hendrikmuhs/ccache-action@v1.2.16 with: key: windows-latest-cmake-sycl variant: ccache evict-old-files: 1d - name: Install run: | scripts/install-oneapi.bat $WINDOWS_BASEKIT_URL $WINDOWS_DPCPP_MKL # TODO: add libcurl support ; we will also need to modify win-build-sycl.bat to accept user-specified args - name: Build id: cmake_build run: examples/sycl/win-build-sycl.bat - name: Determine tag name id: tag uses: ./.github/actions/get-tag-name - name: Build the release package id: pack_artifacts run: | echo "cp oneAPI running time dll files in ${{ env.ONEAPI_ROOT }} to ./build/bin" cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_sycl_blas.5.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_core.2.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_tbb_thread.2.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_level_zero.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_opencl.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_loader.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_win_proxy_loader.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl8.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/svml_dispmd.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libmmd.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libiomp5md.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/dnnl/latest/bin/dnnl.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/tbb/latest/bin/tbb12.dll" ./build/bin echo "cp oneAPI running time dll files to ./build/bin done" 7z a llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip ./build/bin/* - name: Upload the release package uses: actions/upload-artifact@v4 with: path: llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip name: llama-bin-win-sycl-x64.zip windows-hip: runs-on: windows-latest strategy: matrix: gpu_target: [gfx1100, gfx1101, gfx1030] steps: - name: Clone id: checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: Clone rocWMMA repository id: clone_rocwmma run: | git clone https://github.com/rocm/rocwmma --branch rocm-6.2.4 --depth 1 - name: ccache uses: hendrikmuhs/ccache-action@v1.2.16 with: key: windows-latest-cmake-hip-release evict-old-files: 1d - name: Install id: depends run: | $ErrorActionPreference = "Stop" write-host "Downloading AMD HIP SDK Installer" Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" write-host "Installing AMD HIP SDK" Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait write-host "Completed AMD HIP SDK installation" - name: Verify ROCm id: verify run: | & 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version - name: libCURL id: get_libcurl uses: ./.github/actions/windows-setup-curl - name: Build id: cmake_build env: CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }} run: | $env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path) $env:CMAKE_PREFIX_PATH="${env:HIP_PATH}" cmake -G "Unix Makefiles" -B build -S . ` -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" ` -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" ` -DCMAKE_CXX_FLAGS="-I$($PWD.Path.Replace('\', '/'))/rocwmma/library/include/" ` -DCMAKE_BUILD_TYPE=Release ` -DAMDGPU_TARGETS=${{ matrix.gpu_target }} ` -DGGML_HIP_ROCWMMA_FATTN=ON ` -DGGML_HIP=ON ` -DCURL_LIBRARY="$env:CURL_PATH/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="$env:CURL_PATH/include" ` ${{ env.CMAKE_ARGS }} cmake --build build -j ${env:NUMBER_OF_PROCESSORS} md "build\bin\rocblas\library\" cp "${env:HIP_PATH}\bin\hipblas.dll" "build\bin\" cp "${env:HIP_PATH}\bin\rocblas.dll" "build\bin\" cp "${env:HIP_PATH}\bin\rocblas\library\*" "build\bin\rocblas\library\" - name: Determine tag name id: tag uses: ./.github/actions/get-tag-name - name: Pack artifacts id: pack_artifacts env: CURL_PATH: ${{ steps.get_libcurl.outputs.curl_path }} run: | cp $env:CURL_PATH\bin\libcurl-x64.dll .\build\bin\libcurl-x64.dll 7z a llama-${{ steps.tag.outputs.name }}-bin-win-hip-x64-${{ matrix.gpu_target }}.zip .\build\bin\* - name: Upload artifacts uses: actions/upload-artifact@v4 with: path: llama-${{ steps.tag.outputs.name }}-bin-win-hip-x64-${{ matrix.gpu_target }}.zip name: llama-bin-win-hip-x64-${{ matrix.gpu_target }}.zip ios-xcode-build: runs-on: macos-latest steps: - name: Checkout code uses: actions/checkout@v4 with: fetch-depth: 0 - name: Build id: cmake_build run: | sysctl -a cmake -B build -G Xcode \ -DGGML_METAL_USE_BF16=ON \ -DGGML_METAL_EMBED_LIBRARY=ON \ -DLLAMA_CURL=OFF \ -DLLAMA_BUILD_EXAMPLES=OFF \ -DLLAMA_BUILD_TOOLS=OFF \ -DLLAMA_BUILD_TESTS=OFF \ -DLLAMA_BUILD_SERVER=OFF \ -DCMAKE_SYSTEM_NAME=iOS \ -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \ -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO - name: xcodebuild for swift package id: xcodebuild run: | ./build-xcframework.sh - name: Build Xcode project run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' FRAMEWORK_FOLDER_PATH=./build-ios build - name: Determine tag name id: tag uses: ./.github/actions/get-tag-name - name: Pack artifacts id: pack_artifacts run: | zip --symlinks -r llama-${{ steps.tag.outputs.name }}-xcframework.zip build-apple/llama.xcframework - name: Upload artifacts uses: actions/upload-artifact@v4 with: path: llama-${{ steps.tag.outputs.name }}-xcframework.zip name: llama-${{ steps.tag.outputs.name }}-xcframework release: if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} # Fine-grant permission # https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token permissions: contents: write # for creating release runs-on: ubuntu-latest needs: - ubuntu-22-cpu - ubuntu-22-vulkan - windows - windows-cuda - windows-sycl - windows-hip - macOS-arm64 - macOS-x64 steps: - name: Clone id: checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: Determine tag name id: tag uses: ./.github/actions/get-tag-name - name: Download artifacts id: download-artifact uses: actions/download-artifact@v4 with: path: ./artifact - name: Move artifacts id: move_artifacts run: mkdir -p ./artifact/release && mv ./artifact/*/*.zip ./artifact/release - name: Create release id: create_release uses: ggml-org/action-create-release@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: tag_name: ${{ steps.tag.outputs.name }} - name: Upload release id: upload_release uses: actions/github-script@v3 with: github-token: ${{secrets.GITHUB_TOKEN}} script: | const path = require('path'); const fs = require('fs'); const release_id = '${{ steps.create_release.outputs.id }}'; for (let file of await fs.readdirSync('./artifact/release')) { if (path.extname(file) === '.zip') { console.log('uploadReleaseAsset', file); await github.repos.uploadReleaseAsset({ owner: context.repo.owner, repo: context.repo.repo, release_id: release_id, name: file, data: await fs.readFileSync(`./artifact/release/${file}`) }); } }