SHA256
1
0
forked from pool/ollama

- Update to version 0.1.45:

OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/ollama?expand=0&rev=27
This commit is contained in:
Eyad Issa 2024-06-22 12:15:28 +00:00 committed by Git OBS Bridge
parent d49ff0ffe7
commit 5b2fad2c49
9 changed files with 46 additions and 27 deletions

View File

@ -3,7 +3,7 @@
<service name="obs_scm" mode="manual"> <service name="obs_scm" mode="manual">
<param name="url">https://github.com/ollama/ollama.git</param> <param name="url">https://github.com/ollama/ollama.git</param>
<param name="scm">git</param> <param name="scm">git</param>
<param name="revision">v0.1.44</param> <param name="revision">v0.1.45</param>
<param name="versionformat">@PARENT_TAG@</param> <param name="versionformat">@PARENT_TAG@</param>
<param name="versionrewrite-pattern">v(.*)</param> <param name="versionrewrite-pattern">v(.*)</param>
<param name="changesgenerate">enable</param> <param name="changesgenerate">enable</param>

View File

@ -1,4 +1,4 @@
<servicedata> <servicedata>
<service name="tar_scm"> <service name="tar_scm">
<param name="url">https://github.com/ollama/ollama.git</param> <param name="url">https://github.com/ollama/ollama.git</param>
<param name="changesrevision">c39761c5525132d96e1da0956a9aa39e87b54114</param></service></servicedata> <param name="changesrevision">e01e535cbbb92e0d9645bd726e259e7d8a6c7598</param></service></servicedata>

View File

@ -1,30 +1,28 @@
diff -rub ollama/llm/generate/gen_linux.sh ollama-patched/llm/generate/gen_linux.sh diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh
--- ollama/llm/generate/gen_linux.sh 2024-04-23 04:40:58.246062467 +0200 index 28ce1f2..4193a43 100755
+++ ollama-patched/llm/generate/gen_linux.sh 2024-04-23 04:37:36.432294889 +0200 --- a/llm/generate/gen_linux.sh
@@ -51,7 +51,7 @@ +++ b/llm/generate/gen_linux.sh
export CUDACXX=$(command -v nvcc) @@ -52,6 +52,7 @@ if [ -z "${CUDACXX}" ]; then
fi fi
fi fi
-COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off" COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off"
+COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off" +COMMON_CMAKE_DEFS="${COMMON_CMAKE_DEFS} -DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release"
source $(dirname $0)/gen_common.sh source $(dirname $0)/gen_common.sh
init_vars init_vars
git_module_setup git_module_setup
@@ -77,7 +77,7 @@ @@ -78,6 +79,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
if [ -n "${OLLAMA_CUSTOM_CPU_DEFS}" ]; then
init_vars init_vars
echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\"" echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\""
- CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}" CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
+ CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release ${CMAKE_DEFS}" + CMAKE_DEFS="${CMAKE_DEFS} -DLLAMA_LTO=on"
BUILD_DIR="../build/linux/${ARCH}/cpu" BUILD_DIR="../build/linux/${ARCH}/cpu"
echo "Building custom CPU" echo "Building custom CPU"
build build
@@ -93,7 +93,7 @@ @@ -94,6 +96,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
# -DLLAMA_AVX512_VBMI -- 2018 Intel Cannon Lake
# -DLLAMA_AVX512_VNNI -- 2021 Intel Alder Lake # -DLLAMA_AVX512_VNNI -- 2021 Intel Alder Lake
- COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off" COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_OPENMP=off"
+ COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release -DLLAMA_NATIVE=off" + COMMON_CPU_DEFS="-DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release"
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
# #
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta) # CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)

BIN
ollama-0.1.44.obscpio (Stored with Git LFS)

Binary file not shown.

BIN
ollama-0.1.45.obscpio (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -1,3 +1,24 @@
-------------------------------------------------------------------
Sat Jun 22 10:08:00 UTC 2024 - Eyad Issa <eyadlorenzo@gmail.com>
- Update to version 0.1.45:
* New models: DeepSeek-Coder-V2: A 16B & 236B open-source
Mixture-of-Experts code language model that achieves
performance comparable to GPT4-Turbo in code-specific tasks.
* ollama show <model> will now show model information such as
context window size
* Model loading on Windows with CUDA GPUs is now faster
* Setting seed in the /v1/chat/completions OpenAI compatibility
endpoint no longer changes temperature
* Enhanced GPU discovery and multi-gpu support with concurrency
* Introduced a workaround for AMD Vega RX 56 SDMA support on
Linux
* Fix memory prediction for deepseek-v2 and deepseek-coder-v2
models
* api/show endpoint returns extensive model metadata
* GPU configuration variables are now reported in ollama serve
* Update Linux ROCm to v6.1.1
------------------------------------------------------------------- -------------------------------------------------------------------
Tue Jun 18 12:12:41 UTC 2024 - Eyad Issa <eyadlorenzo@gmail.com> Tue Jun 18 12:12:41 UTC 2024 - Eyad Issa <eyadlorenzo@gmail.com>

View File

@ -1,4 +1,4 @@
name: ollama name: ollama
version: 0.1.44 version: 0.1.45
mtime: 1718310369 mtime: 1718905584
commit: c39761c5525132d96e1da0956a9aa39e87b54114 commit: e01e535cbbb92e0d9645bd726e259e7d8a6c7598

View File

@ -17,7 +17,7 @@
Name: ollama Name: ollama
Version: 0.1.44 Version: 0.1.45
Release: 0 Release: 0
Summary: Tool for running AI models on-premise Summary: Tool for running AI models on-premise
License: MIT License: MIT

BIN
vendor.tar.zstd (Stored with Git LFS)

Binary file not shown.