ollama/enable-lto.patch
Eyad Issa 2808304cf4 - Update to version 0.3.12:
* Llama 3.2: Meta's Llama 3.2 goes small with 1B and 3B 
    models.
  * Qwen 2.5 Coder: The latest series of Code-Specific Qwen 
    models, with significant improvements in code generation, 
    code reasoning, and code fixing.
  * Ollama now supports ARM Windows machines
  * Fixed rare issue where Ollama would report a missing .dll
    file on Windows
  * Fixed performance issue for Windows without GPUs

OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/ollama?expand=0&rev=53
2024-09-29 21:30:54 +00:00

21 lines
1.3 KiB
Diff

--- a/llm/generate/gen_linux.sh.orig 2024-09-17 12:52:41.511508050 +0200
+++ b/llm/generate/gen_linux.sh 2024-09-17 13:01:55.316347171 +0200
@@ -52,7 +52,7 @@
export CUDACXX=$(command -v nvcc)
fi
fi
-COMMON_CMAKE_DEFS="-DCMAKE_SKIP_RPATH=on -DBUILD_SHARED_LIBS=on -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off"
+COMMON_CMAKE_DEFS="-DCMAKE_SKIP_RPATH=on -DBUILD_SHARED_LIBS=on -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off -DGGML_LTO=on"
source $(dirname $0)/gen_common.sh
init_vars
git_module_setup
@@ -95,7 +95,7 @@
# -DGGML_AVX512_VBMI -- 2018 Intel Cannon Lake
# -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake
- COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=on -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off"
+ COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=on -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off -DGGML_LTO=on"
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
#
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)