diff --git a/_service b/_service
index f078d48..c1ea1fa 100644
--- a/_service
+++ b/_service
@@ -3,7 +3,7 @@
https://github.com/ollama/ollama.git
git
- v0.1.48
+ v0.2.5
@PARENT_TAG@
v(.*)
enable
diff --git a/_servicedata b/_servicedata
index 782684c..ce1c968 100644
--- a/_servicedata
+++ b/_servicedata
@@ -1,4 +1,4 @@
https://github.com/ollama/ollama.git
- 717f7229eb4f9220d4070aae617923950643d327
\ No newline at end of file
+ f7ee0123008dbdb3fd5954438d12196951b58b78
\ No newline at end of file
diff --git a/enable-lto.patch b/enable-lto.patch
index cae786c..c018f14 100644
--- a/enable-lto.patch
+++ b/enable-lto.patch
@@ -1,28 +1,28 @@
diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh
-index 28ce1f2..4193a43 100755
+index db2c6c3..8194cd9 100755
--- a/llm/generate/gen_linux.sh
+++ b/llm/generate/gen_linux.sh
@@ -52,6 +52,7 @@ if [ -z "${CUDACXX}" ]; then
fi
fi
- COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off"
-+COMMON_CMAKE_DEFS="${COMMON_CMAKE_DEFS} -DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release"
+ COMMON_CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off"
++COMMON_CMAKE_DEFS="${COMMON_CMAKE_DEFS} -DGGML_LTO=on -DCMAKE_BUILD_TYPE=Release"
source $(dirname $0)/gen_common.sh
init_vars
git_module_setup
@@ -78,6 +79,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
init_vars
echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\""
- CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
-+ CMAKE_DEFS="${CMAKE_DEFS} -DLLAMA_LTO=on"
+ CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
++ CMAKE_DEFS="${CMAKE_DEFS} -DGGML_LTO=on"
BUILD_DIR="../build/linux/${ARCH}/cpu"
echo "Building custom CPU"
build
@@ -94,6 +96,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
- # -DLLAMA_AVX512_VNNI -- 2021 Intel Alder Lake
+ # -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake
- COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_OPENMP=off"
-+ COMMON_CPU_DEFS="-DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release"
+ COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off"
++ COMMON_CPU_DEFS="-DGGML_LTO=on -DCMAKE_BUILD_TYPE=Release"
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
#
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
diff --git a/ollama-0.1.48.obscpio b/ollama-0.1.48.obscpio
deleted file mode 100644
index 544c67e..0000000
--- a/ollama-0.1.48.obscpio
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:3ad003ac20a614eefd770e000e818729f1723b1299286479d9669211eeaed710
-size 160434702
diff --git a/ollama-0.2.5.obscpio b/ollama-0.2.5.obscpio
new file mode 100644
index 0000000..77a32dd
--- /dev/null
+++ b/ollama-0.2.5.obscpio
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0a19afdb4bd732dd717c5a97dc8baed30939f4cd74395c304876ef837d041d6f
+size 161660942
diff --git a/ollama.changes b/ollama.changes
index e47a57e..ff029f2 100644
--- a/ollama.changes
+++ b/ollama.changes
@@ -1,4 +1,51 @@
-------------------------------------------------------------------
+Sun Jul 14 17:48:36 UTC 2024 - eyadlorenzo@gmail.com
+
+- Update to version 0.2.5:
+ * Fixed issue where a model's SYSTEM message not be applied
+
+- Update to version 0.2.4:
+ * Fixed issue where context, load_duration and total_duration
+ fields would not be set in the /api/generate endpoint.
+ * Ollama will no longer error if loading models larger than
+ system memory if disk space is available
+
+- Update to version 0.2.3:
+ * Fix issue where system prompt would not be applied
+
+- Update to version 0.2.2:
+ * Fixed errors that occurred when using Ollama with Nvidia V100
+ GPUs
+ * glm4 models will no longer fail to load from out of memory
+ errors
+ * Fixed error that would occur when running deepseek-v2 and
+ deepseek-coder-v2 models
+ * Fixed a series of out of memory issues when using Nvidia
+ GPUs
+ * Fixed a series of errors that would occur when using multiple
+ Radeon GPUs
+
+- Update to version 0.2.1:
+ * Fixed issue where setting OLLAMA_NUM_PARALLEL would cause
+ models to be reloaded after each request
+
+- Update to version 0.2.0:
+ * Ollama 0.2.0 is now available with concurrency support.
+ This unlocks 2 specific features:
+ ~ Ollama can now serve multiple requests at the same time
+ ~ Ollama now supports loading different models at the same time
+ * New models: GLM-4: A strong multi-lingual general language
+ model with competitive performance to Llama 3.
+ * New models: CodeGeeX4: A versatile model for AI software
+ development scenarios, including code completion.
+ * New models: Gemma 2: Improved output quality and base text
+ generation models now available
+ * Ollama will now show a better error if a model architecture
+ isn't supported
+ * Improved handling of quotes and spaces in Modelfile FROM lines
+ * Ollama will now return an error if the system does not have
+ enough memory to run a model on Linux
+-------------------------------------------------------------------
Sun Jul 07 19:18:11 UTC 2024 - Eyad Issa
- Update to version 0.1.48:
diff --git a/ollama.obsinfo b/ollama.obsinfo
index 742073d..f4b72f3 100644
--- a/ollama.obsinfo
+++ b/ollama.obsinfo
@@ -1,4 +1,4 @@
name: ollama
-version: 0.1.48
-mtime: 1719628771
-commit: 717f7229eb4f9220d4070aae617923950643d327
+version: 0.2.5
+mtime: 1720908480
+commit: f7ee0123008dbdb3fd5954438d12196951b58b78
diff --git a/ollama.spec b/ollama.spec
index e862f70..9dfadf4 100644
--- a/ollama.spec
+++ b/ollama.spec
@@ -17,7 +17,7 @@
Name: ollama
-Version: 0.1.48
+Version: 0.2.5
Release: 0
Summary: Tool for running AI models on-premise
License: MIT
diff --git a/vendor.tar.zstd b/vendor.tar.zstd
index 1a62430..aa4d859 100644
--- a/vendor.tar.zstd
+++ b/vendor.tar.zstd
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:857f1af7a25e48841716d40b5226e61f6436ae38e322fb9b50e5e7aed379ee06
-size 5307324
+oid sha256:0a7dde5a5d4e0794b5a9b5e7dd865559a6625ef387a90d2843581d008a9c5af2
+size 5355013