Accepting request 1201962 from science:machinelearning
OBS-URL: https://build.opensuse.org/request/show/1201962 OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/ollama?expand=0&rev=19
This commit is contained in:
commit
e5b1fec77c
2
_service
2
_service
@ -3,7 +3,7 @@
|
|||||||
<service name="obs_scm" mode="manual">
|
<service name="obs_scm" mode="manual">
|
||||||
<param name="url">https://github.com/ollama/ollama.git</param>
|
<param name="url">https://github.com/ollama/ollama.git</param>
|
||||||
<param name="scm">git</param>
|
<param name="scm">git</param>
|
||||||
<param name="revision">v0.3.6</param>
|
<param name="revision">v0.3.10</param>
|
||||||
<param name="versionformat">@PARENT_TAG@</param>
|
<param name="versionformat">@PARENT_TAG@</param>
|
||||||
<param name="versionrewrite-pattern">v(.*)</param>
|
<param name="versionrewrite-pattern">v(.*)</param>
|
||||||
<param name="changesgenerate">enable</param>
|
<param name="changesgenerate">enable</param>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<servicedata>
|
<servicedata>
|
||||||
<service name="tar_scm">
|
<service name="tar_scm">
|
||||||
<param name="url">https://github.com/ollama/ollama.git</param>
|
<param name="url">https://github.com/ollama/ollama.git</param>
|
||||||
<param name="changesrevision">4c4fe3f87fe1858b35bd0d41e093a0039ec4cee4</param></service></servicedata>
|
<param name="changesrevision">06d4fba851b91eb55da892d23834e8fe75096ca7</param></service></servicedata>
|
@ -1,28 +1,20 @@
|
|||||||
diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh
|
--- a/llm/generate/gen_linux.sh.orig 2024-09-17 12:52:41.511508050 +0200
|
||||||
index db2c6c3..8194cd9 100755
|
+++ b/llm/generate/gen_linux.sh 2024-09-17 13:01:55.316347171 +0200
|
||||||
--- a/llm/generate/gen_linux.sh
|
@@ -52,7 +52,7 @@
|
||||||
+++ b/llm/generate/gen_linux.sh
|
export CUDACXX=$(command -v nvcc)
|
||||||
@@ -52,6 +52,7 @@ if [ -z "${CUDACXX}" ]; then
|
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
COMMON_CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off"
|
-COMMON_CMAKE_DEFS="-DCMAKE_SKIP_RPATH=on -DBUILD_SHARED_LIBS=on -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off"
|
||||||
+COMMON_CMAKE_DEFS="${COMMON_CMAKE_DEFS} -DGGML_LTO=on -DCMAKE_BUILD_TYPE=Release"
|
+COMMON_CMAKE_DEFS="-DCMAKE_SKIP_RPATH=on -DBUILD_SHARED_LIBS=on -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off -DGGML_LTO=on"
|
||||||
source $(dirname $0)/gen_common.sh
|
source $(dirname $0)/gen_common.sh
|
||||||
init_vars
|
init_vars
|
||||||
git_module_setup
|
git_module_setup
|
||||||
@@ -78,6 +79,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
|
@@ -95,7 +95,7 @@
|
||||||
init_vars
|
# -DGGML_AVX512_VBMI -- 2018 Intel Cannon Lake
|
||||||
echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\""
|
|
||||||
CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
|
|
||||||
+ CMAKE_DEFS="${CMAKE_DEFS} -DGGML_LTO=on"
|
|
||||||
BUILD_DIR="../build/linux/${ARCH}/cpu"
|
|
||||||
echo "Building custom CPU"
|
|
||||||
build
|
|
||||||
@@ -94,6 +96,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
|
|
||||||
# -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake
|
# -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake
|
||||||
|
|
||||||
COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off"
|
- COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=on -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off"
|
||||||
+ COMMON_CPU_DEFS="${COMMON_CPU_DEFS} -DGGML_LTO=on -DCMAKE_BUILD_TYPE=Release"
|
+ COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=on -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off -DGGML_LTO=on"
|
||||||
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
|
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
|
||||||
#
|
#
|
||||||
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|
||||||
|
3
ollama-0.3.10.obscpio
Normal file
3
ollama-0.3.10.obscpio
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:eb0a02c484b24248cd1f5f59dd80868f034fc6d202b2b8747ca148b84c3e8ada
|
||||||
|
size 187615246
|
@ -1,3 +0,0 @@
|
|||||||
version https://git-lfs.github.com/spec/v1
|
|
||||||
oid sha256:96235e8d8cfdc20c732a699cc2036c907027c67482aa75fb0a7f3b6aa5089077
|
|
||||||
size 184983566
|
|
113
ollama.changes
113
ollama.changes
@ -1,3 +1,116 @@
|
|||||||
|
-------------------------------------------------------------------
|
||||||
|
Tue Sep 17 10:48:34 UTC 2024 - adrian@suse.de
|
||||||
|
|
||||||
|
- Update to version 0.3.10:
|
||||||
|
* openai: align chat temperature and frequency_penalty options with completion (#6688)
|
||||||
|
* docs: improve linux install documentation (#6683)
|
||||||
|
* openai: don't scale temperature or frequency_penalty (#6514)
|
||||||
|
* readme: add Archyve to community integrations (#6680)
|
||||||
|
* readme: add Plasmoid Ollama Control to community integrations (#6681)
|
||||||
|
* Improve logging on GPU too small (#6666)
|
||||||
|
* openai: fix "presence_penalty" typo and add test (#6665)
|
||||||
|
* Fix gemma2 2b conversion (#6645)
|
||||||
|
* Document uninstall on windows (#6663)
|
||||||
|
* Revert "Detect running in a container (#6495)" (#6662)
|
||||||
|
* llm: make load time stall duration configurable via OLLAMA_LOAD_TIMEOUT
|
||||||
|
* Introduce GPU Overhead env var (#5922)
|
||||||
|
* Detect running in a container (#6495)
|
||||||
|
* readme: add AiLama to the list of community integrations (#4957)
|
||||||
|
* Update gpu.md: Add RTX 3050 Ti and RTX 3050 Ti (#5888)
|
||||||
|
* server: fix blob download when receiving a 200 response (#6656)
|
||||||
|
* readme: add Gentoo package manager entry to community integrations (#5714)
|
||||||
|
* Update install.sh:Replace "command -v" with encapsulated functionality (#6035)
|
||||||
|
* readme: include Enchanted for Apple Vision Pro (#4949)
|
||||||
|
* readme: add lsp-ai to community integrations (#5063)
|
||||||
|
* readme: add ollama-php library to community integrations (#6361)
|
||||||
|
* readme: add vnc-lm discord bot community integration (#6644)
|
||||||
|
* llm: use json.hpp from common (#6642)
|
||||||
|
* readme: add confichat to community integrations (#6378)
|
||||||
|
* docs: add group to manual Linux isntructions and verify service is running (#6430)
|
||||||
|
* readme: add gollm to the list of community libraries (#6099)
|
||||||
|
* readme: add Cherry Studio to community integrations (#6633)
|
||||||
|
* readme: add Go fun package (#6421)
|
||||||
|
* docs: fix spelling error (#6391)
|
||||||
|
* install.sh: update instructions to use WSL2 (#6450)
|
||||||
|
* readme: add claude-dev to community integrations (#6630)
|
||||||
|
* readme: add PyOllaMx project (#6624)
|
||||||
|
* llm: update llama.cpp commit to 8962422 (#6618)
|
||||||
|
* Use cuda v11 for driver 525 and older (#6620)
|
||||||
|
* Log system memory at info (#6617)
|
||||||
|
* readme: add Painting Droid community integration (#5514)
|
||||||
|
* readme: update Ollama4j link and add link to Ollama4j Web UI (#6608)
|
||||||
|
* Fix sprintf to snprintf (#5664)
|
||||||
|
* readme: add PartCAD tool to readme for generating 3D CAD models using Ollama (#6605)
|
||||||
|
* Reduce docker image size (#5847)
|
||||||
|
* readme: add OllamaFarm project (#6508)
|
||||||
|
* readme: add go-crew and Ollamaclient projects (#6583)
|
||||||
|
* docs: update faq.md for OLLAMA_MODELS env var permissions (#6587)
|
||||||
|
* fix(cmd): show info may have nil ModelInfo (#6579)
|
||||||
|
* docs: update GGUF examples and references (#6577)
|
||||||
|
* Add findutils to base images (#6581)
|
||||||
|
* remove any unneeded build artifacts
|
||||||
|
* doc: Add Nix and Flox to package manager listing (#6074)
|
||||||
|
* update the openai docs to explain how to set the context size (#6548)
|
||||||
|
* fix(test): do not clobber models directory
|
||||||
|
* add llama3.1 chat template (#6545)
|
||||||
|
* update deprecated warnings
|
||||||
|
* validate model path
|
||||||
|
* throw an error when encountering unsupport tensor sizes (#6538)
|
||||||
|
* Move ollama executable out of bin dir (#6535)
|
||||||
|
* update templates to use messages
|
||||||
|
* more tokenizer tests
|
||||||
|
* add safetensors to the modelfile docs (#6532)
|
||||||
|
* Fix import image width (#6528)
|
||||||
|
* Update manual instructions with discrete ROCm bundle (#6445)
|
||||||
|
* llm: fix typo in comment (#6530)
|
||||||
|
* adjust image sizes
|
||||||
|
* clean up convert tokenizer
|
||||||
|
* detect chat template from configs that contain lists
|
||||||
|
* update the import docs (#6104)
|
||||||
|
* server: clean up route names for consistency (#6524)
|
||||||
|
* Only enable numa on CPUs (#6484)
|
||||||
|
* gpu: Group GPU Library sets by variant (#6483)
|
||||||
|
* update faq
|
||||||
|
* passthrough OLLAMA_HOST path to client
|
||||||
|
* convert safetensor adapters into GGUF (#6327)
|
||||||
|
* gpu: Ensure driver version set before variant (#6480)
|
||||||
|
* llm: Align cmake define for cuda no peer copy (#6455)
|
||||||
|
* Fix embeddings memory corruption (#6467)
|
||||||
|
* llama3.1
|
||||||
|
* convert gemma2
|
||||||
|
* create bert models from cli
|
||||||
|
* bert
|
||||||
|
* Split rocm back out of bundle (#6432)
|
||||||
|
* CI: remove directories from dist dir before upload step (#6429)
|
||||||
|
* CI: handle directories during checksum (#6427)
|
||||||
|
* Fix overlapping artifact name on CI
|
||||||
|
* Review comments
|
||||||
|
* Adjust layout to bin+lib/ollama
|
||||||
|
* Remove Jetpack
|
||||||
|
* Add windows cuda v12 + v11 support
|
||||||
|
* Enable cuda v12 flags
|
||||||
|
* Add cuda v12 variant and selection logic
|
||||||
|
* Report GPU variant in log
|
||||||
|
* Add Jetson cuda variants for arm
|
||||||
|
* Wire up ccache and pigz in the docker based build
|
||||||
|
* Refactor linux packaging
|
||||||
|
* server: limit upload parts to 16 (#6411)
|
||||||
|
* Fix white space.
|
||||||
|
* Reset NumCtx.
|
||||||
|
* Override numParallel only if unset.
|
||||||
|
* fix: chmod new layer to 0o644 when creating it
|
||||||
|
* fix: Add tooltip to system tray icon
|
||||||
|
* only skip invalid json manifests
|
||||||
|
* skip invalid manifest files
|
||||||
|
* fix noprune
|
||||||
|
* add `CONTRIBUTING.md` (#6349)
|
||||||
|
* Fix typo and improve readability (#5964)
|
||||||
|
* server: reduce max connections used in download (#6347)
|
||||||
|
* update chatml template format to latest in docs (#6344)
|
||||||
|
* lint
|
||||||
|
* Update openai.md to remove extra checkbox (#6345)
|
||||||
|
* llama3.1 memory
|
||||||
|
|
||||||
-------------------------------------------------------------------
|
-------------------------------------------------------------------
|
||||||
Thu Aug 15 18:59:48 UTC 2024 - Eyad Issa <eyadlorenzo@gmail.com>
|
Thu Aug 15 18:59:48 UTC 2024 - Eyad Issa <eyadlorenzo@gmail.com>
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
name: ollama
|
name: ollama
|
||||||
version: 0.3.6
|
version: 0.3.10
|
||||||
mtime: 1723575229
|
mtime: 1725725288
|
||||||
commit: 4c4fe3f87fe1858b35bd0d41e093a0039ec4cee4
|
commit: 06d4fba851b91eb55da892d23834e8fe75096ca7
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
|
|
||||||
|
|
||||||
Name: ollama
|
Name: ollama
|
||||||
Version: 0.3.6
|
Version: 0.3.10
|
||||||
Release: 0
|
Release: 0
|
||||||
Summary: Tool for running AI models on-premise
|
Summary: Tool for running AI models on-premise
|
||||||
License: MIT
|
License: MIT
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
version https://git-lfs.github.com/spec/v1
|
version https://git-lfs.github.com/spec/v1
|
||||||
oid sha256:55ce911e80d49668a716385a09682e6a6ad44d711486ee38b32e8269a0a6a3df
|
oid sha256:4fc07ae239af7667ad257ce20adddb3d6271ef14d06ef632348d2fb6c83a49db
|
||||||
size 5354975
|
size 5355011
|
||||||
|
Loading…
Reference in New Issue
Block a user