Accepting request 1181491 from science:machinelearning

OBS-URL: https://build.opensuse.org/request/show/1181491
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/ollama?expand=0&rev=9
This commit is contained in:
Ana Guerrero 2024-06-18 20:52:16 +00:00 committed by Git OBS Bridge
commit 1abab79606
8 changed files with 58 additions and 11 deletions

View File

@ -3,7 +3,7 @@
<service name="obs_scm" mode="manual"> <service name="obs_scm" mode="manual">
<param name="url">https://github.com/ollama/ollama.git</param> <param name="url">https://github.com/ollama/ollama.git</param>
<param name="scm">git</param> <param name="scm">git</param>
<param name="revision">v0.1.40</param> <param name="revision">v0.1.44</param>
<param name="versionformat">@PARENT_TAG@</param> <param name="versionformat">@PARENT_TAG@</param>
<param name="versionrewrite-pattern">v(.*)</param> <param name="versionrewrite-pattern">v(.*)</param>
<param name="changesgenerate">enable</param> <param name="changesgenerate">enable</param>

View File

@ -1,4 +1,4 @@
<servicedata> <servicedata>
<service name="tar_scm"> <service name="tar_scm">
<param name="url">https://github.com/ollama/ollama.git</param> <param name="url">https://github.com/ollama/ollama.git</param>
<param name="changesrevision">829ff87bd1a98eff727003d3b24748f0f7d8c3ac</param></service></servicedata> <param name="changesrevision">c39761c5525132d96e1da0956a9aa39e87b54114</param></service></servicedata>

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fb2c4c7d4180f84ee10eb4a000a86de0f3fa79302722b88b808320ab8f9d8351
size 179825166

3
ollama-0.1.44.obscpio Normal file
View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5558006700a3829e4aa3abac5b598bf836b109961b776b63a2bf536638e8e699
size 155404814

View File

@ -1,3 +1,46 @@
-------------------------------------------------------------------
Tue Jun 18 12:12:41 UTC 2024 - Eyad Issa <eyadlorenzo@gmail.com>
- Added documentation files to .spec
- Update to version 0.1.44:
* Fixed issue where unicode characters such as emojis would not
be loaded correctly when running ollama create
* Fixed certain cases where Nvidia GPUs would not be detected and
reported as compute capability 1.0 devices
- Update to version 0.1.43:
* New import.md guide for converting and importing models to
Ollama
* Fixed issue where embedding vectors resulting from
/api/embeddings would not be accurate
* JSON mode responses will no longer include invalid escape
characters
* Removing a model will no longer show incorrect File not found
errors
* Fixed issue where running ollama create would result in an
error on Windows with certain file formatting
- Update to version 0.1.42:
* New models: Qwen 2: a new series of large language models
from Alibaba group
* Qwen 2: a new series of large language models from Alibaba
group
* ollama pull is now faster if it detects a model is already
downloaded
* ollama create will now automatically detect prompt templates
for popular model architectures such as Llama, Gemma, Phi and
more.
* Ollama can now be accessed from local apps built with Electron
and Tauri, as well as in developing apps in local html files
* Update welcome prompt in Windows to llama3
* Fixed issues where /api/ps and /api/tags would show invalid
timestamps in responses
- Update to version 0.1.41:
* Fixed issue on Windows 10 and 11 with Intel CPUs with
integrated GPUs where Ollama would encounter an error
------------------------------------------------------------------- -------------------------------------------------------------------
Sat Jun 01 21:12:20 UTC 2024 - Eyad Issa <eyadlorenzo@gmail.com> Sat Jun 01 21:12:20 UTC 2024 - Eyad Issa <eyadlorenzo@gmail.com>

View File

@ -1,4 +1,4 @@
name: ollama name: ollama
version: 0.1.40 version: 0.1.44
mtime: 1717206861 mtime: 1718310369
commit: 829ff87bd1a98eff727003d3b24748f0f7d8c3ac commit: c39761c5525132d96e1da0956a9aa39e87b54114

View File

@ -17,7 +17,7 @@
Name: ollama Name: ollama
Version: 0.1.40 Version: 0.1.44
Release: 0 Release: 0
Summary: Tool for running AI models on-premise Summary: Tool for running AI models on-premise
License: MIT License: MIT
@ -75,6 +75,9 @@ install -D -m 0644 %{SOURCE2} %{buildroot}%{_unitdir}/%{name}.service
install -D -m 0644 %{SOURCE3} %{buildroot}%{_sysusersdir}/%{name}-user.conf install -D -m 0644 %{SOURCE3} %{buildroot}%{_sysusersdir}/%{name}-user.conf
install -d %{buildroot}/var/lib/%{name} install -d %{buildroot}/var/lib/%{name}
mkdir -p "%buildroot/%_docdir/%name"
cp -Ra docs/* "%buildroot/%_docdir/%name"
%pre -f %{name}.pre %pre -f %{name}.pre
%service_add_pre %{name}.service %service_add_pre %{name}.service
@ -90,6 +93,7 @@ install -d %{buildroot}/var/lib/%{name}
%files %files
%doc README.md %doc README.md
%license LICENSE %license LICENSE
%{_docdir}/%{name}
%{_bindir}/%{name} %{_bindir}/%{name}
%{_unitdir}/%{name}.service %{_unitdir}/%{name}.service
%{_sysusersdir}/%{name}-user.conf %{_sysusersdir}/%{name}-user.conf

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1 version https://git-lfs.github.com/spec/v1
oid sha256:783885d48a70583689a056c794d449d2d66cd80a13c03f016f8cc33262e444b1 oid sha256:c237ad965be69a774d3c48a34950391ff225737aa034aac0db88c04328121bbd
size 5323720 size 5307629