mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-28 12:25:03 +00:00
10
ci/run.sh
10
ci/run.sh
@ -496,10 +496,12 @@ test $ret -eq 0 && gg_run ctest_debug
|
|||||||
test $ret -eq 0 && gg_run ctest_release
|
test $ret -eq 0 && gg_run ctest_release
|
||||||
|
|
||||||
if [ -z ${GG_BUILD_LOW_PERF} ]; then
|
if [ -z ${GG_BUILD_LOW_PERF} ]; then
|
||||||
if [ -z ${GG_BUILD_CUDA} ]; then
|
if [ -z ${GG_BUILD_VRAM_GB} ] || [ ${GG_BUILD_VRAM_GB} -ge 8 ]; then
|
||||||
test $ret -eq 0 && gg_run open_llama_3b_v2
|
if [ -z ${GG_BUILD_CUDA} ]; then
|
||||||
else
|
test $ret -eq 0 && gg_run open_llama_3b_v2
|
||||||
test $ret -eq 0 && gg_run open_llama_7b_v2
|
else
|
||||||
|
test $ret -eq 0 && gg_run open_llama_7b_v2
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user