Accepting request 1187407 from science:machinelearning

- Update to version 0.2.5:
- Update to version 0.2.4:
- Update to version 0.2.3:
- Update to version 0.2.2:
- Update to version 0.2.1:
- Update to version 0.2.0:

OBS-URL: https://build.opensuse.org/request/show/1187407
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/ollama?expand=0&rev=12
This commit is contained in:
Ana Guerrero 2024-07-15 17:49:07 +00:00 committed by Git OBS Bridge
commit b2ca9b9e96
9 changed files with 66 additions and 19 deletions

View File

@ -3,7 +3,7 @@
<service name="obs_scm" mode="manual"> <service name="obs_scm" mode="manual">
<param name="url">https://github.com/ollama/ollama.git</param> <param name="url">https://github.com/ollama/ollama.git</param>
<param name="scm">git</param> <param name="scm">git</param>
<param name="revision">v0.1.48</param> <param name="revision">v0.2.5</param>
<param name="versionformat">@PARENT_TAG@</param> <param name="versionformat">@PARENT_TAG@</param>
<param name="versionrewrite-pattern">v(.*)</param> <param name="versionrewrite-pattern">v(.*)</param>
<param name="changesgenerate">enable</param> <param name="changesgenerate">enable</param>

View File

@ -1,4 +1,4 @@
<servicedata> <servicedata>
<service name="tar_scm"> <service name="tar_scm">
<param name="url">https://github.com/ollama/ollama.git</param> <param name="url">https://github.com/ollama/ollama.git</param>
<param name="changesrevision">717f7229eb4f9220d4070aae617923950643d327</param></service></servicedata> <param name="changesrevision">f7ee0123008dbdb3fd5954438d12196951b58b78</param></service></servicedata>

View File

@ -1,28 +1,28 @@
diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh
index 28ce1f2..4193a43 100755 index db2c6c3..8194cd9 100755
--- a/llm/generate/gen_linux.sh --- a/llm/generate/gen_linux.sh
+++ b/llm/generate/gen_linux.sh +++ b/llm/generate/gen_linux.sh
@@ -52,6 +52,7 @@ if [ -z "${CUDACXX}" ]; then @@ -52,6 +52,7 @@ if [ -z "${CUDACXX}" ]; then
fi fi
fi fi
COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off" COMMON_CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off"
+COMMON_CMAKE_DEFS="${COMMON_CMAKE_DEFS} -DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release" +COMMON_CMAKE_DEFS="${COMMON_CMAKE_DEFS} -DGGML_LTO=on -DCMAKE_BUILD_TYPE=Release"
source $(dirname $0)/gen_common.sh source $(dirname $0)/gen_common.sh
init_vars init_vars
git_module_setup git_module_setup
@@ -78,6 +79,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then @@ -78,6 +79,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
init_vars init_vars
echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\"" echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\""
CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}" CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
+ CMAKE_DEFS="${CMAKE_DEFS} -DLLAMA_LTO=on" + CMAKE_DEFS="${CMAKE_DEFS} -DGGML_LTO=on"
BUILD_DIR="../build/linux/${ARCH}/cpu" BUILD_DIR="../build/linux/${ARCH}/cpu"
echo "Building custom CPU" echo "Building custom CPU"
build build
@@ -94,6 +96,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then @@ -94,6 +96,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
# -DLLAMA_AVX512_VNNI -- 2021 Intel Alder Lake # -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake
COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_OPENMP=off" COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off"
+ COMMON_CPU_DEFS="-DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release" + COMMON_CPU_DEFS="-DGGML_LTO=on -DCMAKE_BUILD_TYPE=Release"
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
# #
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta) # CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:3ad003ac20a614eefd770e000e818729f1723b1299286479d9669211eeaed710
size 160434702

3
ollama-0.2.5.obscpio Normal file
View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0a19afdb4bd732dd717c5a97dc8baed30939f4cd74395c304876ef837d041d6f
size 161660942

View File

@ -1,4 +1,51 @@
------------------------------------------------------------------- -------------------------------------------------------------------
Sun Jul 14 17:48:36 UTC 2024 - eyadlorenzo@gmail.com
- Update to version 0.2.5:
* Fixed issue where a model's SYSTEM message not be applied
- Update to version 0.2.4:
* Fixed issue where context, load_duration and total_duration
fields would not be set in the /api/generate endpoint.
* Ollama will no longer error if loading models larger than
system memory if disk space is available
- Update to version 0.2.3:
* Fix issue where system prompt would not be applied
- Update to version 0.2.2:
* Fixed errors that occurred when using Ollama with Nvidia V100
GPUs
* glm4 models will no longer fail to load from out of memory
errors
* Fixed error that would occur when running deepseek-v2 and
deepseek-coder-v2 models
* Fixed a series of out of memory issues when using Nvidia
GPUs
* Fixed a series of errors that would occur when using multiple
Radeon GPUs
- Update to version 0.2.1:
* Fixed issue where setting OLLAMA_NUM_PARALLEL would cause
models to be reloaded after each request
- Update to version 0.2.0:
* Ollama 0.2.0 is now available with concurrency support.
This unlocks 2 specific features:
~ Ollama can now serve multiple requests at the same time
~ Ollama now supports loading different models at the same time
* New models: GLM-4: A strong multi-lingual general language
model with competitive performance to Llama 3.
* New models: CodeGeeX4: A versatile model for AI software
development scenarios, including code completion.
* New models: Gemma 2: Improved output quality and base text
generation models now available
* Ollama will now show a better error if a model architecture
isn't supported
* Improved handling of quotes and spaces in Modelfile FROM lines
* Ollama will now return an error if the system does not have
enough memory to run a model on Linux
-------------------------------------------------------------------
Sun Jul 07 19:18:11 UTC 2024 - Eyad Issa <eyadlorenzo@gmail.com> Sun Jul 07 19:18:11 UTC 2024 - Eyad Issa <eyadlorenzo@gmail.com>
- Update to version 0.1.48: - Update to version 0.1.48:

View File

@ -1,4 +1,4 @@
name: ollama name: ollama
version: 0.1.48 version: 0.2.5
mtime: 1719628771 mtime: 1720908480
commit: 717f7229eb4f9220d4070aae617923950643d327 commit: f7ee0123008dbdb3fd5954438d12196951b58b78

View File

@ -17,7 +17,7 @@
Name: ollama Name: ollama
Version: 0.1.48 Version: 0.2.5
Release: 0 Release: 0
Summary: Tool for running AI models on-premise Summary: Tool for running AI models on-premise
License: MIT License: MIT

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1 version https://git-lfs.github.com/spec/v1
oid sha256:857f1af7a25e48841716d40b5226e61f6436ae38e322fb9b50e5e7aed379ee06 oid sha256:0a7dde5a5d4e0794b5a9b5e7dd865559a6625ef387a90d2843581d008a9c5af2
size 5307324 size 5355013