llama: end-to-end tests (#19802)
* tests: add end-to-end tests per model architecture * fixup for rebase * fix use-after-free in llama-model-loader.cpp * fix CI * fix WebGPU * fix CI * disable CI for macOS-latest-cmake-arm64 * use expert_weights_scale only if != 0.0f * comments
This commit is contained in:
Executable
+18
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cmake_args=()
|
||||
llama_results_args=()
|
||||
|
||||
for arg in "${@}"; do
|
||||
if [[ "$arg" == -D* ]]; then
|
||||
cmake_args+=("$arg")
|
||||
else
|
||||
llama_results_args+=("$arg")
|
||||
fi
|
||||
done
|
||||
|
||||
dir="build-bisect"
|
||||
rm -rf ${dir} > /dev/null
|
||||
cmake -B ${dir} -S . ${cmake_args} > /dev/null
|
||||
cmake --build ${dir} -t llama-results -j $(nproc) > /dev/null
|
||||
${dir}/bin/llama-results "${llama_results_args[@]}"
|
||||
Reference in New Issue
Block a user