diff --git a/_service b/_service
index fa763d3..87537f4 100644
--- a/_service
+++ b/_service
@@ -3,7 +3,7 @@
https://github.com/ollama/ollama.git
git
- v0.1.44
+ v0.1.45
@PARENT_TAG@
v(.*)
enable
diff --git a/_servicedata b/_servicedata
index 01cb1df..9746b08 100644
--- a/_servicedata
+++ b/_servicedata
@@ -1,4 +1,4 @@
https://github.com/ollama/ollama.git
- c39761c5525132d96e1da0956a9aa39e87b54114
\ No newline at end of file
+ e01e535cbbb92e0d9645bd726e259e7d8a6c7598
\ No newline at end of file
diff --git a/enable-lto.patch b/enable-lto.patch
index ae8ea1a..cae786c 100644
--- a/enable-lto.patch
+++ b/enable-lto.patch
@@ -1,30 +1,28 @@
-diff -rub ollama/llm/generate/gen_linux.sh ollama-patched/llm/generate/gen_linux.sh
---- ollama/llm/generate/gen_linux.sh 2024-04-23 04:40:58.246062467 +0200
-+++ ollama-patched/llm/generate/gen_linux.sh 2024-04-23 04:37:36.432294889 +0200
-@@ -51,7 +51,7 @@
- export CUDACXX=$(command -v nvcc)
+diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh
+index 28ce1f2..4193a43 100755
+--- a/llm/generate/gen_linux.sh
++++ b/llm/generate/gen_linux.sh
+@@ -52,6 +52,7 @@ if [ -z "${CUDACXX}" ]; then
fi
fi
--COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off"
-+COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off"
+ COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off"
++COMMON_CMAKE_DEFS="${COMMON_CMAKE_DEFS} -DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release"
source $(dirname $0)/gen_common.sh
init_vars
git_module_setup
-@@ -77,7 +77,7 @@
- if [ -n "${OLLAMA_CUSTOM_CPU_DEFS}" ]; then
+@@ -78,6 +79,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
init_vars
echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\""
-- CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
-+ CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release ${CMAKE_DEFS}"
+ CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
++ CMAKE_DEFS="${CMAKE_DEFS} -DLLAMA_LTO=on"
BUILD_DIR="../build/linux/${ARCH}/cpu"
echo "Building custom CPU"
build
-@@ -93,7 +93,7 @@
- # -DLLAMA_AVX512_VBMI -- 2018 Intel Cannon Lake
+@@ -94,6 +96,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
# -DLLAMA_AVX512_VNNI -- 2021 Intel Alder Lake
-
-- COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off"
-+ COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release -DLLAMA_NATIVE=off"
+
+ COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_OPENMP=off"
++ COMMON_CPU_DEFS="-DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release"
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
#
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
diff --git a/ollama-0.1.44.obscpio b/ollama-0.1.44.obscpio
deleted file mode 100644
index 902b85a..0000000
--- a/ollama-0.1.44.obscpio
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:5558006700a3829e4aa3abac5b598bf836b109961b776b63a2bf536638e8e699
-size 155404814
diff --git a/ollama-0.1.45.obscpio b/ollama-0.1.45.obscpio
new file mode 100644
index 0000000..d08649d
--- /dev/null
+++ b/ollama-0.1.45.obscpio
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ecc23b875cd051a46ed9c9da0481bfd1a1b11e859b63ceb782d673a6534bda5e
+size 189517838
diff --git a/ollama.changes b/ollama.changes
index 6776826..3e0e423 100644
--- a/ollama.changes
+++ b/ollama.changes
@@ -1,3 +1,24 @@
+-------------------------------------------------------------------
+Sat Jun 22 10:08:00 UTC 2024 - Eyad Issa
+
+- Update to version 0.1.45:
+ * New models: DeepSeek-Coder-V2: A 16B & 236B open-source
+ Mixture-of-Experts code language model that achieves
+ performance comparable to GPT4-Turbo in code-specific tasks.
+ * ollama show will now show model information such as
+ context window size
+ * Model loading on Windows with CUDA GPUs is now faster
+ * Setting seed in the /v1/chat/completions OpenAI compatibility
+ endpoint no longer changes temperature
+ * Enhanced GPU discovery and multi-gpu support with concurrency
+ * Introduced a workaround for AMD Vega RX 56 SDMA support on
+ Linux
+ * Fix memory prediction for deepseek-v2 and deepseek-coder-v2
+ models
+ * api/show endpoint returns extensive model metadata
+ * GPU configuration variables are now reported in ollama serve
+ * Update Linux ROCm to v6.1.1
+
-------------------------------------------------------------------
Tue Jun 18 12:12:41 UTC 2024 - Eyad Issa
diff --git a/ollama.obsinfo b/ollama.obsinfo
index de2a58d..15190a4 100644
--- a/ollama.obsinfo
+++ b/ollama.obsinfo
@@ -1,4 +1,4 @@
name: ollama
-version: 0.1.44
-mtime: 1718310369
-commit: c39761c5525132d96e1da0956a9aa39e87b54114
+version: 0.1.45
+mtime: 1718905584
+commit: e01e535cbbb92e0d9645bd726e259e7d8a6c7598
diff --git a/ollama.spec b/ollama.spec
index b31cc4f..e48c8f0 100644
--- a/ollama.spec
+++ b/ollama.spec
@@ -17,7 +17,7 @@
Name: ollama
-Version: 0.1.44
+Version: 0.1.45
Release: 0
Summary: Tool for running AI models on-premise
License: MIT
diff --git a/vendor.tar.zstd b/vendor.tar.zstd
index a706ad8..f1c4179 100644
--- a/vendor.tar.zstd
+++ b/vendor.tar.zstd
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:c237ad965be69a774d3c48a34950391ff225737aa034aac0db88c04328121bbd
-size 5307629
+oid sha256:bd09eae14f52b19ab71daa5129e2a51856457ab1af391a40e869835759114f8a
+size 5307431