Eyad Issa
c97461a42d
* openai: align chat temperature and frequency_penalty options with completion (#6688) * docs: improve linux install documentation (#6683) * openai: don't scale temperature or frequency_penalty (#6514) * readme: add Archyve to community integrations (#6680) * readme: add Plasmoid Ollama Control to community integrations (#6681) * Improve logging on GPU too small (#6666) * openai: fix "presence_penalty" typo and add test (#6665) * Fix gemma2 2b conversion (#6645) * Document uninstall on windows (#6663) * Revert "Detect running in a container (#6495)" (#6662) * llm: make load time stall duration configurable via OLLAMA_LOAD_TIMEOUT * Introduce GPU Overhead env var (#5922) * Detect running in a container (#6495) * readme: add AiLama to the list of community integrations (#4957) * Update gpu.md: Add RTX 3050 Ti and RTX 3050 Ti (#5888) * server: fix blob download when receiving a 200 response (#6656) * readme: add Gentoo package manager entry to community integrations (#5714) * Update install.sh:Replace "command -v" with encapsulated functionality (#6035) * readme: include Enchanted for Apple Vision Pro (#4949) * readme: add lsp-ai to community integrations (#5063) * readme: add ollama-php library to community integrations (#6361) * readme: add vnc-lm discord bot community integration (#6644) * llm: use json.hpp from common (#6642) * readme: add confichat to community integrations (#6378) * docs: add group to manual Linux isntructions and verify service is running (#6430) * readme: add gollm to the list of community libraries (#6099) * readme: add Cherry Studio to community integrations (#6633) * readme: add Go fun package (#6421) * docs: fix spelling error (#6391) OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/ollama?expand=0&rev=49
21 lines
1.3 KiB
Diff
21 lines
1.3 KiB
Diff
--- a/llm/generate/gen_linux.sh.orig 2024-09-17 12:52:41.511508050 +0200
|
|
+++ b/llm/generate/gen_linux.sh 2024-09-17 13:01:55.316347171 +0200
|
|
@@ -52,7 +52,7 @@
|
|
export CUDACXX=$(command -v nvcc)
|
|
fi
|
|
fi
|
|
-COMMON_CMAKE_DEFS="-DCMAKE_SKIP_RPATH=on -DBUILD_SHARED_LIBS=on -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off"
|
|
+COMMON_CMAKE_DEFS="-DCMAKE_SKIP_RPATH=on -DBUILD_SHARED_LIBS=on -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off -DGGML_LTO=on"
|
|
source $(dirname $0)/gen_common.sh
|
|
init_vars
|
|
git_module_setup
|
|
@@ -95,7 +95,7 @@
|
|
# -DGGML_AVX512_VBMI -- 2018 Intel Cannon Lake
|
|
# -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake
|
|
|
|
- COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=on -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off"
|
|
+ COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=on -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off -DGGML_LTO=on"
|
|
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
|
|
#
|
|
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|