Accepting request 1183991 from science:machinelearning
Automatic submission by obs-autosubmit OBS-URL: https://build.opensuse.org/request/show/1183991 OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/ollama?expand=0&rev=10
This commit is contained in:
commit
f2bb339091
2
_service
2
_service
@ -3,7 +3,7 @@
|
|||||||
<service name="obs_scm" mode="manual">
|
<service name="obs_scm" mode="manual">
|
||||||
<param name="url">https://github.com/ollama/ollama.git</param>
|
<param name="url">https://github.com/ollama/ollama.git</param>
|
||||||
<param name="scm">git</param>
|
<param name="scm">git</param>
|
||||||
<param name="revision">v0.1.44</param>
|
<param name="revision">v0.1.45</param>
|
||||||
<param name="versionformat">@PARENT_TAG@</param>
|
<param name="versionformat">@PARENT_TAG@</param>
|
||||||
<param name="versionrewrite-pattern">v(.*)</param>
|
<param name="versionrewrite-pattern">v(.*)</param>
|
||||||
<param name="changesgenerate">enable</param>
|
<param name="changesgenerate">enable</param>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<servicedata>
|
<servicedata>
|
||||||
<service name="tar_scm">
|
<service name="tar_scm">
|
||||||
<param name="url">https://github.com/ollama/ollama.git</param>
|
<param name="url">https://github.com/ollama/ollama.git</param>
|
||||||
<param name="changesrevision">c39761c5525132d96e1da0956a9aa39e87b54114</param></service></servicedata>
|
<param name="changesrevision">e01e535cbbb92e0d9645bd726e259e7d8a6c7598</param></service></servicedata>
|
@ -1,30 +1,28 @@
|
|||||||
diff -rub ollama/llm/generate/gen_linux.sh ollama-patched/llm/generate/gen_linux.sh
|
diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh
|
||||||
--- ollama/llm/generate/gen_linux.sh 2024-04-23 04:40:58.246062467 +0200
|
index 28ce1f2..4193a43 100755
|
||||||
+++ ollama-patched/llm/generate/gen_linux.sh 2024-04-23 04:37:36.432294889 +0200
|
--- a/llm/generate/gen_linux.sh
|
||||||
@@ -51,7 +51,7 @@
|
+++ b/llm/generate/gen_linux.sh
|
||||||
export CUDACXX=$(command -v nvcc)
|
@@ -52,6 +52,7 @@ if [ -z "${CUDACXX}" ]; then
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
-COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off"
|
COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off"
|
||||||
+COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off"
|
+COMMON_CMAKE_DEFS="${COMMON_CMAKE_DEFS} -DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release"
|
||||||
source $(dirname $0)/gen_common.sh
|
source $(dirname $0)/gen_common.sh
|
||||||
init_vars
|
init_vars
|
||||||
git_module_setup
|
git_module_setup
|
||||||
@@ -77,7 +77,7 @@
|
@@ -78,6 +79,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
|
||||||
if [ -n "${OLLAMA_CUSTOM_CPU_DEFS}" ]; then
|
|
||||||
init_vars
|
init_vars
|
||||||
echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\""
|
echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\""
|
||||||
- CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
|
CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
|
||||||
+ CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release ${CMAKE_DEFS}"
|
+ CMAKE_DEFS="${CMAKE_DEFS} -DLLAMA_LTO=on"
|
||||||
BUILD_DIR="../build/linux/${ARCH}/cpu"
|
BUILD_DIR="../build/linux/${ARCH}/cpu"
|
||||||
echo "Building custom CPU"
|
echo "Building custom CPU"
|
||||||
build
|
build
|
||||||
@@ -93,7 +93,7 @@
|
@@ -94,6 +96,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
|
||||||
# -DLLAMA_AVX512_VBMI -- 2018 Intel Cannon Lake
|
|
||||||
# -DLLAMA_AVX512_VNNI -- 2021 Intel Alder Lake
|
# -DLLAMA_AVX512_VNNI -- 2021 Intel Alder Lake
|
||||||
|
|
||||||
- COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off"
|
COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_OPENMP=off"
|
||||||
+ COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release -DLLAMA_NATIVE=off"
|
+ COMMON_CPU_DEFS="-DLLAMA_LTO=on -DCMAKE_BUILD_TYPE=Release"
|
||||||
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
|
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
|
||||||
#
|
#
|
||||||
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
version https://git-lfs.github.com/spec/v1
|
|
||||||
oid sha256:5558006700a3829e4aa3abac5b598bf836b109961b776b63a2bf536638e8e699
|
|
||||||
size 155404814
|
|
3
ollama-0.1.45.obscpio
Normal file
3
ollama-0.1.45.obscpio
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:ecc23b875cd051a46ed9c9da0481bfd1a1b11e859b63ceb782d673a6534bda5e
|
||||||
|
size 189517838
|
@ -1,3 +1,24 @@
|
|||||||
|
-------------------------------------------------------------------
|
||||||
|
Sat Jun 22 10:08:00 UTC 2024 - Eyad Issa <eyadlorenzo@gmail.com>
|
||||||
|
|
||||||
|
- Update to version 0.1.45:
|
||||||
|
* New models: DeepSeek-Coder-V2: A 16B & 236B open-source
|
||||||
|
Mixture-of-Experts code language model that achieves
|
||||||
|
performance comparable to GPT4-Turbo in code-specific tasks.
|
||||||
|
* ollama show <model> will now show model information such as
|
||||||
|
context window size
|
||||||
|
* Model loading on Windows with CUDA GPUs is now faster
|
||||||
|
* Setting seed in the /v1/chat/completions OpenAI compatibility
|
||||||
|
endpoint no longer changes temperature
|
||||||
|
* Enhanced GPU discovery and multi-gpu support with concurrency
|
||||||
|
* Introduced a workaround for AMD Vega RX 56 SDMA support on
|
||||||
|
Linux
|
||||||
|
* Fix memory prediction for deepseek-v2 and deepseek-coder-v2
|
||||||
|
models
|
||||||
|
* api/show endpoint returns extensive model metadata
|
||||||
|
* GPU configuration variables are now reported in ollama serve
|
||||||
|
* Update Linux ROCm to v6.1.1
|
||||||
|
|
||||||
-------------------------------------------------------------------
|
-------------------------------------------------------------------
|
||||||
Tue Jun 18 12:12:41 UTC 2024 - Eyad Issa <eyadlorenzo@gmail.com>
|
Tue Jun 18 12:12:41 UTC 2024 - Eyad Issa <eyadlorenzo@gmail.com>
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
name: ollama
|
name: ollama
|
||||||
version: 0.1.44
|
version: 0.1.45
|
||||||
mtime: 1718310369
|
mtime: 1718905584
|
||||||
commit: c39761c5525132d96e1da0956a9aa39e87b54114
|
commit: e01e535cbbb92e0d9645bd726e259e7d8a6c7598
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
|
|
||||||
|
|
||||||
Name: ollama
|
Name: ollama
|
||||||
Version: 0.1.44
|
Version: 0.1.45
|
||||||
Release: 0
|
Release: 0
|
||||||
Summary: Tool for running AI models on-premise
|
Summary: Tool for running AI models on-premise
|
||||||
License: MIT
|
License: MIT
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
version https://git-lfs.github.com/spec/v1
|
version https://git-lfs.github.com/spec/v1
|
||||||
oid sha256:c237ad965be69a774d3c48a34950391ff225737aa034aac0db88c04328121bbd
|
oid sha256:bd09eae14f52b19ab71daa5129e2a51856457ab1af391a40e869835759114f8a
|
||||||
size 5307629
|
size 5307431
|
||||||
|
Loading…
Reference in New Issue
Block a user