Eyad Issa
808a0b582d
* api embed docs (#5282) * convert: capture `head_dim` for mistral (#5818) * Update llama.cpp submodule commit to `d94c6e0c` (#5805) * server: collect nested tool call objects when parsing (#5824) * Remove no longer supported max vram var * Refine error reporting for subprocess crash * Remove out of space test temporarily (#5825) * llm: consider `head_dim` in llama arch (#5817) * Adjust windows ROCm discovery * add patch for tekken (#5807) * preserve last assistant message (#5802) * Fix generate test flakyness (#5804) * server: validate template (#5734) * OpenAI: Function Based Testing (#5752) * adjust openai chat msg processing (#5729) * fix parsing tool calls * server: check for empty tools array too (#5779) * always provide content even if empty (#5778) * server: only parse tool calls if tools are provided (#5771) * Fix context exhaustion integration test for small gpus * Refine scheduler unit tests for reliability OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/ollama?expand=0&rev=37
24 lines
804 B
Plaintext
24 lines
804 B
Plaintext
<services>
|
|
<service name="format_spec_file" mode="manual" />
|
|
<service name="obs_scm" mode="manual">
|
|
<param name="url">https://github.com/ollama/ollama.git</param>
|
|
<param name="scm">git</param>
|
|
<param name="revision">v0.2.8</param>
|
|
<param name="versionformat">@PARENT_TAG@</param>
|
|
<param name="versionrewrite-pattern">v(.*)</param>
|
|
<param name="changesgenerate">enable</param>
|
|
<param name="submodules">enable</param>
|
|
<param name="exclude">macapp</param>
|
|
<param name="package-meta">yes</param>
|
|
</service>
|
|
<service name="go_modules" mode="manual">
|
|
<param name="compression">zstd</param>
|
|
</service>
|
|
<service name="set_version" mode="manual" />
|
|
|
|
<service name="tar" mode="buildtime">
|
|
<param name="package-meta">yes</param>
|
|
</service>
|
|
|
|
</services>
|