forked from pool/python-gpt4all
Compare commits
8 Commits
| Author | SHA256 | Date | |
|---|---|---|---|
| 73fb41bcc5 | |||
| 0fe43cd6b8 | |||
| b7ebb98a8f | |||
| 26de200753 | |||
| 3ac38bcb80 | |||
| e78206fe04 | |||
| fc5521da14 | |||
| dcb1d59f64 |
@@ -1,3 +0,0 @@
|
|||||||
version https://git-lfs.github.com/spec/v1
|
|
||||||
oid sha256:2fef47fc74c8ccc32b33b8c83f9833b6a4c02e09da8d688abb6ee35167652ea9
|
|
||||||
size 8686531
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
version https://git-lfs.github.com/spec/v1
|
|
||||||
oid sha256:0c1ee9121d00d989750416a1ad4f1cfb035946f5acfe5fb7259bb1fb8b62dc66
|
|
||||||
size 13540622
|
|
||||||
10
fix-aarch64.patch
Normal file
10
fix-aarch64.patch
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
--- gpt4all-3.4.2/gpt4all-backend/llama.cpp.cmake.orig 2025-03-13 09:17:45.100111000 +0100
|
||||||
|
+++ gpt4all-3.4.2/gpt4all-backend/llama.cpp.cmake 2025-03-13 09:18:12.918919300 +0100
|
||||||
|
@@ -845,6 +845,7 @@ function(include_ggml SUFFIX)
|
||||||
|
if (MSVC)
|
||||||
|
# TODO: arm msvc?
|
||||||
|
else()
|
||||||
|
+ include(CheckCXXCompilerFlag)
|
||||||
|
check_cxx_compiler_flag(-mfp16-format=ieee COMPILER_SUPPORTS_FP16_FORMAT_I3E)
|
||||||
|
if (NOT "${COMPILER_SUPPORTS_FP16_FORMAT_I3E}" STREQUAL "")
|
||||||
|
list(APPEND ARCH_FLAGS -mfp16-format=ieee)
|
||||||
3
kompute-7c20efa.tar.gz
Normal file
3
kompute-7c20efa.tar.gz
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:cf51f45eaabd9b1fccc2c871ccdd7be81730bb0172538c9375c1e33bf5ac131c
|
||||||
|
size 13540770
|
||||||
3
llama.cpp-58a55ef.tar.gz
Normal file
3
llama.cpp-58a55ef.tar.gz
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:45de15332c3946b794bc1a625edb641be675bf4499b3ee951557fb231d1f9062
|
||||||
|
size 19144826
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
version https://git-lfs.github.com/spec/v1
|
|
||||||
oid sha256:4c7b072d93a4ba7692e65e3551ab5cb9593b29865869fd4aae26e3eecf0d24ff
|
|
||||||
size 7039354
|
|
||||||
3
python-gpt4all-v3.4.2.tar.gz
Normal file
3
python-gpt4all-v3.4.2.tar.gz
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:5866769e6bb10718e9a70df9522a6a9120598791edeade207a3098186502fd12
|
||||||
|
size 34288844
|
||||||
@@ -1,7 +1,30 @@
|
|||||||
|
-------------------------------------------------------------------
|
||||||
|
Thu Mar 13 08:35:08 UTC 2025 - Guillaume GARDET <guillaume.gardet@opensuse.org>
|
||||||
|
|
||||||
|
- Add patch to fix build on aarch64:
|
||||||
|
* fix-aarch64.patch
|
||||||
|
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
Mon Feb 24 14:08:15 UTC 2025 - Daniel Garcia <daniel.garcia@suse.com>
|
||||||
|
|
||||||
|
- Add vk301.patch to fix build with latest vulkan
|
||||||
|
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
Tue Dec 17 14:34:18 UTC 2024 - Christian Goll <cgoll@suse.com>
|
||||||
|
|
||||||
|
- updated to 3.4.2 (3.5.3 has issues https://github.com/nomic-ai/gpt4all/issues/3310)
|
||||||
|
- removed gpt4all chat as it's relying on jinja2cpp which needs
|
||||||
|
some packaging effort
|
||||||
|
|
||||||
|
-------------------------------------------------------------------
|
||||||
|
Tue Nov 19 15:16:53 UTC 2024 - Dirk Müller <dmueller@suse.com>
|
||||||
|
|
||||||
|
- fix runtime requires
|
||||||
|
|
||||||
-------------------------------------------------------------------
|
-------------------------------------------------------------------
|
||||||
Thu May 23 17:56:11 UTC 2024 - Christian Goll <cgoll@suse.com>
|
Thu May 23 17:56:11 UTC 2024 - Christian Goll <cgoll@suse.com>
|
||||||
|
|
||||||
- added gpt4all-chat what is a QT6-GUI and updated to latest
|
- added gpt4all-chat what is a QT6-GUI and updated to latest
|
||||||
llamacpp a3f03b7
|
llamacpp a3f03b7
|
||||||
- rename gpt4all.rpmlintrc to python-gpt4all.rpmlintrc
|
- rename gpt4all.rpmlintrc to python-gpt4all.rpmlintrc
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#
|
#
|
||||||
# spec file for package python-gpt4all
|
# spec file for package python-gpt4all
|
||||||
#
|
#
|
||||||
# Copyright (c) 2024 SUSE LLC
|
# Copyright (c) 2025 SUSE LLC
|
||||||
#
|
#
|
||||||
# All modifications and additions to the file contributed by third parties
|
# All modifications and additions to the file contributed by third parties
|
||||||
# remain the property of their copyright owners, unless otherwise agreed
|
# remain the property of their copyright owners, unless otherwise agreed
|
||||||
@@ -16,13 +16,12 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
%define llamavers a3f03b7
|
%define llamavers 58a55ef
|
||||||
%define komputevers c339310
|
%define komputevers 7c20efa
|
||||||
|
|
||||||
%{?sle15_python_module_pythons}
|
%{?sle15_python_module_pythons}
|
||||||
|
|
||||||
Name: python-gpt4all
|
Name: python-gpt4all
|
||||||
Version: 2.7.3
|
Version: 3.4.2
|
||||||
Release: 0
|
Release: 0
|
||||||
Summary: open source llms for all
|
Summary: open source llms for all
|
||||||
License: Apache-2.0 AND MIT
|
License: Apache-2.0 AND MIT
|
||||||
@@ -30,10 +29,14 @@ URL: https://github.com/nomic-ai/gpt4all
|
|||||||
#MIT
|
#MIT
|
||||||
Source0: https://github.com/nomic-ai/gpt4all/archive/refs/tags/v%{version}.tar.gz#/%{name}-v%{version}.tar.gz
|
Source0: https://github.com/nomic-ai/gpt4all/archive/refs/tags/v%{version}.tar.gz#/%{name}-v%{version}.tar.gz
|
||||||
#MIT
|
#MIT
|
||||||
Source1: https://github.com/nomic-ai/llama.cpp/archive/%{llamavers}.tar.gz
|
Source1: https://github.com/nomic-ai/llama.cpp/archive/%{llamavers}.tar.gz#/llama.cpp-%{llamavers}.tar.gz
|
||||||
# Apache-2.0
|
# Apache-2.0
|
||||||
Source2: https://github.com/nomic-ai/kompute/archive/c339310.tar.gz
|
Source2: https://github.com/nomic-ai/kompute/archive/%{komputevers}.tar.gz#/kompute-%{komputevers}.tar.gz
|
||||||
Source3: %{name}.rpmlintrc
|
Source3: %{name}.rpmlintrc
|
||||||
|
# PATCH-FIX-OPENSUSE vk301.patch gh#KhronosGroup/Vulkan-Samples#1269
|
||||||
|
Patch1: vk301.patch
|
||||||
|
# PATCH-FIX-UPSTREAM - https://github.com/nomic-ai/gpt4all/issues/3536
|
||||||
|
Patch2: fix-aarch64.patch
|
||||||
BuildRequires: %{python_module setuptools}
|
BuildRequires: %{python_module setuptools}
|
||||||
BuildRequires: cmake
|
BuildRequires: cmake
|
||||||
BuildRequires: fdupes
|
BuildRequires: fdupes
|
||||||
@@ -44,22 +47,26 @@ BuildRequires: gcc-c++
|
|||||||
%endif
|
%endif
|
||||||
BuildRequires: fmt-devel
|
BuildRequires: fmt-devel
|
||||||
BuildRequires: python-rpm-macros
|
BuildRequires: python-rpm-macros
|
||||||
|
BuildRequires: qt6-base-common-devel
|
||||||
BuildRequires: qt6-httpserver-devel
|
BuildRequires: qt6-httpserver-devel
|
||||||
|
BuildRequires: qt6-linguist-devel
|
||||||
BuildRequires: qt6-pdf-devel
|
BuildRequires: qt6-pdf-devel
|
||||||
BuildRequires: qt6-quickdialogs2-devel
|
BuildRequires: qt6-quickdialogs2-devel
|
||||||
BuildRequires: qt6-sql-devel
|
BuildRequires: qt6-sql-devel
|
||||||
BuildRequires: qt6-svg-devel
|
BuildRequires: qt6-svg-devel
|
||||||
BuildRequires: qt6-wayland-devel
|
BuildRequires: qt6-wayland-devel
|
||||||
|
BuildRequires: rapidjson-devel
|
||||||
BuildRequires: shaderc
|
BuildRequires: shaderc
|
||||||
BuildRequires: shaderc-devel
|
BuildRequires: shaderc-devel
|
||||||
BuildRequires: update-desktop-files
|
BuildRequires: update-desktop-files
|
||||||
BuildRequires: vulkan-devel
|
BuildRequires: vulkan-devel
|
||||||
BuildRequires: vulkan-utility-libraries-devel
|
BuildRequires: vulkan-utility-libraries-devel
|
||||||
Requires: %{python_module importlib-metadata}
|
Requires: python-importlib-metadata
|
||||||
Requires: %{python_module requests}
|
Requires: python-jinja2
|
||||||
Requires: %{python_module tqdm}
|
Requires: python-requests
|
||||||
Requires: %{python_module typer}
|
Requires: python-tqdm
|
||||||
Requires: %{python_module typing_extensions}
|
Requires: python-typer
|
||||||
|
Requires: python-typing_extensions
|
||||||
%python_subpackages
|
%python_subpackages
|
||||||
|
|
||||||
%description
|
%description
|
||||||
@@ -83,14 +90,19 @@ Libnrairy for aessing the models
|
|||||||
|
|
||||||
%prep
|
%prep
|
||||||
%setup -n gpt4all-%{version}
|
%setup -n gpt4all-%{version}
|
||||||
cd gpt4all-backend
|
|
||||||
|
pushd gpt4all-backend/deps/
|
||||||
rmdir llama.cpp-mainline
|
rmdir llama.cpp-mainline
|
||||||
tar xzf %{S:1}
|
tar xzf %{S:1}
|
||||||
mv llama.cpp-%{llamavers}* llama.cpp-mainline
|
mv llama.cpp-%{llamavers}* llama.cpp-mainline
|
||||||
cd llama.cpp-mainline
|
pushd llama.cpp-mainline/ggml/src
|
||||||
rmdir kompute
|
rmdir kompute
|
||||||
tar xzf %{S:2}
|
tar xzf %{S:2}
|
||||||
mv kompute-%{komputevers}* kompute
|
mv kompute-%{komputevers}* kompute
|
||||||
|
popd
|
||||||
|
popd
|
||||||
|
%patch -P1 -p1
|
||||||
|
%patch -P2 -p1
|
||||||
|
|
||||||
%build
|
%build
|
||||||
%if 0%{?sle_version} == 150600
|
%if 0%{?sle_version} == 150600
|
||||||
@@ -100,16 +112,7 @@ export CC=gcc-12
|
|||||||
cd gpt4all-backend
|
cd gpt4all-backend
|
||||||
%cmake -DLLAMA_KOMPUTE=ON \
|
%cmake -DLLAMA_KOMPUTE=ON \
|
||||||
-DLLMODEL_CUDA=OFF \
|
-DLLMODEL_CUDA=OFF \
|
||||||
-DLLMODEL_VULKAN=ON \
|
-DLLMODEL_VULKAN=OFF \
|
||||||
-DKOMPUTE_OPT_USE_BUILT_IN_VULKAN_HEADER=OFF \
|
|
||||||
-DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON \
|
|
||||||
-DKOMPUTE_OPT_USE_BUILT_IN_FMT=OFF \
|
|
||||||
-DCMAKE_BUILD_TYPE=RelWithDebInfo
|
|
||||||
%cmake_build
|
|
||||||
cd ../../gpt4all-chat
|
|
||||||
%cmake -DLLAMA_KOMPUTE=ON \
|
|
||||||
-DLLMODEL_CUDA=OFF \
|
|
||||||
-DLLMODEL_VULKAN=ON \
|
|
||||||
-DKOMPUTE_OPT_USE_BUILT_IN_VULKAN_HEADER=OFF \
|
-DKOMPUTE_OPT_USE_BUILT_IN_VULKAN_HEADER=OFF \
|
||||||
-DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON \
|
-DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON \
|
||||||
-DKOMPUTE_OPT_USE_BUILT_IN_FMT=OFF \
|
-DKOMPUTE_OPT_USE_BUILT_IN_FMT=OFF \
|
||||||
@@ -122,40 +125,8 @@ cd ../../gpt4all-bindings/python
|
|||||||
%install
|
%install
|
||||||
cd gpt4all-bindings/python
|
cd gpt4all-bindings/python
|
||||||
%python_install
|
%python_install
|
||||||
%python_expand %fdupes %{buildroot}%{$python_sitearch}
|
|
||||||
install -D -m 0755 ../cli/app.py %{buildroot}/%{_bindir}/gpt4all-app
|
|
||||||
%{python_expand # fix shebang
|
|
||||||
sed -i 's|%{_bindir}/env python.*$|%{_bindir}/$python|' %{buildroot}/%{_bindir}/gpt4all-app
|
|
||||||
}
|
|
||||||
%python_clone -a %{buildroot}/%{_bindir}/gpt4all-app
|
|
||||||
cd ../../gpt4all-chat
|
|
||||||
%cmake_install
|
|
||||||
|
|
||||||
%suse_update_desktop_file -c gpt4all-chat chat "Open-source assistant-style large language models that run locally on your CPU" gpt4all-chat gpt4all-chat.svg
|
|
||||||
|
|
||||||
mv %{buildroot}%{_bindir}/chat %{buildroot}%{_bindir}/gpt4all-chat
|
|
||||||
rm -v %{buildroot}%{_prefix}/lib/*.a
|
|
||||||
mkdir -p %{buildroot}%{_libdir}
|
|
||||||
mv -v %{buildroot}%{_prefix}/lib/libllmodel.so* %{buildroot}%{_libdir}
|
|
||||||
|
|
||||||
%post
|
|
||||||
%python_install_alternative gpt4all-app
|
|
||||||
|
|
||||||
%postun
|
|
||||||
%python_uninstall_alternative gpt4all-app
|
|
||||||
|
|
||||||
%files %{python_files}
|
%files %{python_files}
|
||||||
%{python_sitelib}/*
|
%{python_sitelib}/*
|
||||||
%python_alternative %{_bindir}/gpt4all-app
|
|
||||||
|
|
||||||
%files -n gpt4all-chat
|
|
||||||
%{_bindir}/gpt4all-chat
|
|
||||||
%{_prefix}/lib/libgptj*
|
|
||||||
%{_prefix}/lib/libllamamodel*
|
|
||||||
%{_prefix}/lib/libbert*
|
|
||||||
%{_datadir}/applications/gpt4all-chat.desktop
|
|
||||||
|
|
||||||
%files -n libllmodel0
|
|
||||||
%{_libdir}/libllmodel.so*
|
|
||||||
|
|
||||||
%changelog
|
%changelog
|
||||||
|
|||||||
46
vk301.patch
Normal file
46
vk301.patch
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
Index: gpt4all-3.4.2/gpt4all-backend/deps/llama.cpp-mainline/ggml/src/kompute/src/include/kompute/Manager.hpp
|
||||||
|
===================================================================
|
||||||
|
--- gpt4all-3.4.2.orig/gpt4all-backend/deps/llama.cpp-mainline/ggml/src/kompute/src/include/kompute/Manager.hpp
|
||||||
|
+++ gpt4all-3.4.2/gpt4all-backend/deps/llama.cpp-mainline/ggml/src/kompute/src/include/kompute/Manager.hpp
|
||||||
|
@@ -255,7 +255,11 @@ class Manager
|
||||||
|
bool mFreeInstance = false;
|
||||||
|
std::shared_ptr<vk::PhysicalDevice> mPhysicalDevice = nullptr;
|
||||||
|
std::shared_ptr<vk::Device> mDevice = nullptr;
|
||||||
|
+#if VK_HEADER_VERSION >= 301
|
||||||
|
+ std::shared_ptr<vk::detail::DynamicLoader> mDynamicLoader = nullptr;
|
||||||
|
+#else
|
||||||
|
std::shared_ptr<vk::DynamicLoader> mDynamicLoader = nullptr;
|
||||||
|
+#endif
|
||||||
|
bool mFreeDevice = false;
|
||||||
|
|
||||||
|
// -------------- ALWAYS OWNED RESOURCES
|
||||||
|
@@ -271,8 +275,12 @@ class Manager
|
||||||
|
|
||||||
|
#ifndef KOMPUTE_DISABLE_VK_DEBUG_LAYERS
|
||||||
|
vk::DebugReportCallbackEXT mDebugReportCallback;
|
||||||
|
+#if VK_HEADER_VERSION >= 301
|
||||||
|
+ vk::detail::DispatchLoaderDynamic mDebugDispatcher;
|
||||||
|
+#else
|
||||||
|
vk::DispatchLoaderDynamic mDebugDispatcher;
|
||||||
|
#endif
|
||||||
|
+#endif
|
||||||
|
|
||||||
|
// Create functions
|
||||||
|
void createInstance();
|
||||||
|
Index: gpt4all-3.4.2/gpt4all-backend/deps/llama.cpp-mainline/ggml/src/kompute/src/Manager.cpp
|
||||||
|
===================================================================
|
||||||
|
--- gpt4all-3.4.2.orig/gpt4all-backend/deps/llama.cpp-mainline/ggml/src/kompute/src/Manager.cpp
|
||||||
|
+++ gpt4all-3.4.2/gpt4all-backend/deps/llama.cpp-mainline/ggml/src/kompute/src/Manager.cpp
|
||||||
|
@@ -181,7 +181,12 @@ Manager::createInstance()
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
+#if VK_HEADER_VERSION >= 301
|
||||||
|
+ mDynamicLoader = std::make_shared<vk::detail::DynamicLoader>();
|
||||||
|
+#else
|
||||||
|
mDynamicLoader = std::make_shared<vk::DynamicLoader>();
|
||||||
|
+#endif
|
||||||
|
+
|
||||||
|
} catch (const std::exception & err) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user