- openvino-onnx-ml-defines.patch and

openvino-remove-npu-compile-tool.patchhas been removed 
  as it is no longer needed in this version. 
- Update to 2024.4.0
- Summary of major features and improvements  
  * OpenVINO 2024.6 release includes updates for enhanced 
    stability and improved LLM performance.
  * Introduced support for Intel® Arc™ B-Series Graphics 
    (formerly known as Battlemage).
  * Implemented optimizations to improve the inference time and 
    LLM performance on NPUs.
  * Improved LLM performance with GenAI API optimizations and 
    bug fixes.
- Support Change and Deprecation Notices
  * Using deprecated features and components is not advised. They
    are available to enable a smooth transition to new solutions 
    and will be discontinued in the future. To keep using 
    discontinued features, you will have to revert to the last 
    LTS OpenVINO version supporting them. For more details, refer
    to the OpenVINO Legacy Features and Components page.
  * Discontinued in 2024.0:
    + Runtime components:
      - Intel® Gaussian & Neural Accelerator (Intel® GNA)..
        Consider using the Neural Processing Unit (NPU) for 
        low-powered systems like Intel® Core™ Ultra or 14th 
        generation and beyond.
      - OpenVINO C++/C/Python 1.0 APIs (see 2023.3 API transition
        guide for reference).
      - All ONNX Frontend legacy API (known as ONNX_IMPORTER_API)
      - 'PerfomanceMode.UNDEFINED' property as part of the 
        OpenVINO Python API
    + Tools:
      - Deployment Manager. See installation and deployment 
        guides for current distribution options.
      - Accuracy Checker.
      - Post-Training Optimization Tool (POT). Neural Network
        Compression Framework (NNCF) should be used instead.
      - A Git patch for NNCF integration with 
        huggingface/transformers. The recommended approach is
        to use huggingface/optimum-intel for applying NNCF
        optimization on top of models from Hugging Face.
      - Support for Apache MXNet, Caffe, and Kaldi model formats.
        Conversion to ONNX may be used as a solution.
  * Deprecated and to be removed in the future:
    + The macOS x86_64 debug bins will no longer be provided 
      with the OpenVINO toolkit, starting with OpenVINO 2024.5.
    + Python 3.8 is no longer supported, starting with 
      OpenVINO 2024.5.
    + As MxNet doesn’t support Python version higher than 3.8,
      according to the MxNet PyPI project, it is no longer 
      supported by OpenVINO, either.
    + Discrete Keem Bay support is no longer supported, starting
      with OpenVINO 2024.5.
    + Support for discrete devices (formerly codenamed Raptor 
      Lake) is no longer available for NPU.

OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/openvino?expand=0&rev=23
This commit is contained in:
Guillaume GARDET 2025-01-07 17:14:03 +00:00 committed by Git OBS Bridge
commit 6f04946ce8
16 changed files with 1165 additions and 0 deletions

23
.gitattributes vendored Normal file
View File

@ -0,0 +1,23 @@
## Default LFS
*.7z filter=lfs diff=lfs merge=lfs -text
*.bsp filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.gem filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.jar filter=lfs diff=lfs merge=lfs -text
*.lz filter=lfs diff=lfs merge=lfs -text
*.lzma filter=lfs diff=lfs merge=lfs -text
*.obscpio filter=lfs diff=lfs merge=lfs -text
*.oxt filter=lfs diff=lfs merge=lfs -text
*.pdf filter=lfs diff=lfs merge=lfs -text
*.png filter=lfs diff=lfs merge=lfs -text
*.rpm filter=lfs diff=lfs merge=lfs -text
*.tbz filter=lfs diff=lfs merge=lfs -text
*.tbz2 filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.ttf filter=lfs diff=lfs merge=lfs -text
*.txz filter=lfs diff=lfs merge=lfs -text
*.whl filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
.osc

11
_constraints Normal file
View File

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<constraints>
<hardware>
<disk>
<size unit="G">20</size>
</disk>
<memory>
<size unit="G">8</size>
</memory>
</hardware>
</constraints>

16
_service Normal file
View File

@ -0,0 +1,16 @@
<services>
<service name="obs_scm" mode="manual">
<param name="url">https://github.com/openvinotoolkit/openvino.git</param>
<param name="scm">git</param>
<param name="revision">2024.6.0</param>
<param name="version">2024.6.0</param>
<param name="submodules">enable</param>
<param name="filename">openvino</param>
<param name="exclude">.git</param>
</service>
<service name="tar" mode="buildtime" />
<service name="recompress" mode="buildtime">
<param name="file">*.tar</param>
<param name="compression">zstd</param>
</service>
</services>

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:50b3efee39ea06430456d49db9b4173b22999d8b3e7547dc658bb37df82b0f1b
size 1036420623

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fde6d7a29c8284b72866b02b37f6eaff9143f4a3b05f48a098d4965cc53c9248
size 1102958095

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:bf888744a675256fd9afab53a2e9ba738ccd08d1c5153bbbe3ec26385ff9111e
size 1150310927

View File

@ -0,0 +1,12 @@
Index: openvino-2024.0.0/src/plugins/intel_cpu/thirdparty/ComputeLibrary/arm_compute/core/utils/logging/IPrinter.h
===================================================================
--- openvino-2024.0.0.orig/src/plugins/intel_cpu/thirdparty/ComputeLibrary/arm_compute/core/utils/logging/IPrinter.h
+++ openvino-2024.0.0/src/plugins/intel_cpu/thirdparty/ComputeLibrary/arm_compute/core/utils/logging/IPrinter.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_LOGGING_PRINTER_H
#include "support/Mutex.h"
+#include <string>
namespace arm_compute
{

View File

@ -0,0 +1,12 @@
diff -uNr openvino.orig/samples/cpp/build_samples.sh openvino/samples/cpp/build_samples.sh
--- openvino.orig/samples/cpp/build_samples.sh 2024-04-25 01:04:42.451868881 -0300
+++ openvino/samples/cpp/build_samples.sh 2024-04-25 01:05:04.678342617 -0300
@@ -59,7 +59,7 @@
printf "\nSetting environment variables for building samples...\n"
if [ -z "$INTEL_OPENVINO_DIR" ]; then
- if [[ "$SAMPLES_SOURCE_DIR" = "/usr/share/openvino"* ]]; then
+ if [[ "$SAMPLES_SOURCE_DIR" = "/usr/share/OpenVINO"* ]]; then
true
elif [ -e "$SAMPLES_SOURCE_DIR/../../setupvars.sh" ]; then
setupvars_path="$SAMPLES_SOURCE_DIR/../../setupvars.sh"

View File

@ -0,0 +1,87 @@
diff -uNr openvino-2024.6.0.orig/cmake/developer_package/packaging/archive.cmake openvino-2024.6.0/cmake/developer_package/packaging/archive.cmake
--- openvino-2024.6.0.orig/cmake/developer_package/packaging/archive.cmake 2024-12-27 17:04:54.520685198 -0300
+++ openvino-2024.6.0/cmake/developer_package/packaging/archive.cmake 2024-12-27 17:02:57.644273948 -0300
@@ -25,14 +25,18 @@
macro(ov_archive_cpack_set_dirs)
# common "archive" package locations
# TODO: move current variables to OpenVINO specific locations
- set(OV_CPACK_INCLUDEDIR runtime/include)
- set(OV_CPACK_OPENVINO_CMAKEDIR runtime/cmake)
- set(OV_CPACK_DOCDIR docs)
- set(OV_CPACK_LICENSESDIR licenses)
- set(OV_CPACK_SAMPLESDIR samples)
- set(OV_CPACK_WHEELSDIR wheels)
- set(OV_CPACK_DEVREQDIR tools)
- set(OV_CPACK_PYTHONDIR python)
+ set(OV_CPACK_INCLUDEDIR include)
+ set(OV_CPACK_OPENVINO_CMAKEDIR ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME})
+ set(OV_CPACK_DOCDIR ${CMAKE_INSTALL_DOCDIR})
+ set(OV_CPACK_LICENSESDIR ${CMAKE_INSTALL_DATAROOTDIR}/licenses/${PROJECT_NAME})
+ set(OV_CPACK_SAMPLESDIR ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/samples)
+ if (ENABLE_PYTHON)
+ find_package(Python3 QUIET COMPONENTS Interpreter)
+ file(RELATIVE_PATH OV_PYTHON_MODPATH ${CMAKE_INSTALL_PREFIX} ${Python3_SITEARCH})
+ set(OV_CPACK_WHEELSDIR tools)
+ set(OV_CPACK_DEVREQDIR tools)
+ set(OV_CPACK_PYTHONDIR ${OV_PYTHON_MODPATH})
+ endif()
if(USE_BUILD_TYPE_SUBFOLDER)
set(build_type ${CMAKE_BUILD_TYPE})
@@ -49,11 +53,12 @@
set(OV_CPACK_RUNTIMEDIR runtime/lib/${ARCH_FOLDER}/${build_type})
set(OV_CPACK_ARCHIVEDIR runtime/lib/${ARCH_FOLDER}/${build_type})
else()
- set(OV_CPACK_LIBRARYDIR runtime/lib/${ARCH_FOLDER})
- set(OV_CPACK_RUNTIMEDIR runtime/lib/${ARCH_FOLDER})
- set(OV_CPACK_ARCHIVEDIR runtime/lib/${ARCH_FOLDER})
+ set(OV_CPACK_LIBRARYDIR ${CMAKE_INSTALL_LIBDIR})
+ set(OV_CPACK_RUNTIMEDIR ${CMAKE_INSTALL_LIBDIR})
+ set(OV_CPACK_ARCHIVEDIR ${CMAKE_INSTALL_LIBDIR})
endif()
- set(OV_CPACK_PLUGINSDIR ${OV_CPACK_RUNTIMEDIR})
+ set(OV_CPACK_PLUGINSDIR ${OV_CPACK_RUNTIMEDIR}/${PROJECT_NAME})
+
endmacro()
ov_archive_cpack_set_dirs()
diff -uNr openvino-2024.6.0.orig/src/cmake/openvino.cmake openvino-2024.6.0/src/cmake/openvino.cmake
--- openvino-2024.6.0.orig/src/cmake/openvino.cmake 2024-12-27 17:04:55.240687724 -0300
+++ openvino-2024.6.0/src/cmake/openvino.cmake 2024-12-27 17:03:50.176459053 -0300
@@ -267,6 +267,7 @@
# define relative paths
file(RELATIVE_PATH PKGCONFIG_OpenVINO_PREFIX "/${OV_CPACK_RUNTIMEDIR}/pkgconfig" "/")
+ cmake_path(NORMAL_PATH PKGCONFIG_OpenVINO_PREFIX)
set(pkgconfig_in "${OpenVINO_SOURCE_DIR}/cmake/templates/openvino.pc.in")
if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.20 AND OV_GENERATOR_MULTI_CONFIG)
diff -uNr openvino-2024.6.0.orig/src/plugins/intel_npu/tools/compile_tool/cmake/standalone.cmake openvino-2024.6.0/src/plugins/intel_npu/tools/compile_tool/cmake/standalone.cmake
--- openvino-2024.6.0.orig/src/plugins/intel_npu/tools/compile_tool/cmake/standalone.cmake 2024-12-27 17:04:56.868693438 -0300
+++ openvino-2024.6.0/src/plugins/intel_npu/tools/compile_tool/cmake/standalone.cmake 2024-12-28 00:55:18.661614722 -0300
@@ -43,5 +43,5 @@
endif()
install(TARGETS ${TARGET_NAME}
- DESTINATION "tools/${TARGET_NAME}"
+ DESTINATION "${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/tools/${TARGET_NAME}"
COMPONENT npu_tools)
diff -uNr openvino-2024.6.0.orig/src/plugins/intel_npu/tools/compile_tool/CMakeLists.txt openvino-2024.6.0/src/plugins/intel_npu/tools/compile_tool/CMakeLists.txt
--- openvino-2024.6.0.orig/src/plugins/intel_npu/tools/compile_tool/CMakeLists.txt 2024-12-27 17:04:56.868693438 -0300
+++ openvino-2024.6.0/src/plugins/intel_npu/tools/compile_tool/CMakeLists.txt 2024-12-28 02:18:52.768816190 -0300
@@ -41,13 +41,13 @@
#
install(TARGETS ${TARGET_NAME}
- RUNTIME DESTINATION "tools/${TARGET_NAME}"
+ RUNTIME DESTINATION "${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/tools/${TARGET_NAME}"
COMPONENT ${NPU_INTERNAL_COMPONENT}
${OV_CPACK_COMP_NPU_INTERNAL_EXCLUDE_ALL})
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/README.md")
install(FILES "${CMAKE_CURRENT_SOURCE_DIR}/README.md"
- DESTINATION "tools/${TARGET_NAME}"
+ DESTINATION "${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/tools/${TARGET_NAME}"
COMPONENT ${NPU_INTERNAL_COMPONENT}
${OV_CPACK_COMP_NPU_INTERNAL_EXCLUDE_ALL})
endif()

View File

@ -0,0 +1,12 @@
Index: openvino-2024.0.0/thirdparty/dependencies.cmake
===================================================================
--- openvino-2024.0.0.orig/thirdparty/dependencies.cmake
+++ openvino-2024.0.0/thirdparty/dependencies.cmake
@@ -482,6 +482,7 @@ if(ENABLE_OV_ONNX_FRONTEND)
if(ONNX_FOUND)
# conan and vcpkg create imported targets 'onnx' and 'onnx_proto'
+ add_compile_definitions(ONNX_ML=1)
else()
add_subdirectory(thirdparty/onnx)
endif()

View File

@ -0,0 +1,28 @@
diff -uNr openvino.orig/src/plugins/intel_npu/tools/CMakeLists.txt openvino/src/plugins/intel_npu/tools/CMakeLists.txt
--- openvino.orig/src/plugins/intel_npu/tools/CMakeLists.txt 2024-08-02 23:32:03.216982353 -0300
+++ openvino/src/plugins/intel_npu/tools/CMakeLists.txt 2024-08-04 17:22:22.899469769 -0300
@@ -4,5 +4,4 @@
#
add_subdirectory(common)
-add_subdirectory(compile_tool)
add_subdirectory(single-image-test)
diff -uNr openvino.orig/src/plugins/intel_npu/tools/compile_tool/CMakeLists.txt openvino/src/plugins/intel_npu/tools/compile_tool/CMakeLists.txt
--- openvino.orig/src/plugins/intel_npu/tools/compile_tool/CMakeLists.txt 2024-08-02 23:32:03.216982353 -0300
+++ openvino/src/plugins/intel_npu/tools/compile_tool/CMakeLists.txt 2024-08-03 02:36:25.059440300 -0300
@@ -44,13 +44,13 @@
#
install(TARGETS ${TARGET_NAME}
- RUNTIME DESTINATION "tools/${TARGET_NAME}"
+ RUNTIME DESTINATION "share/OpenVINO/tools/${TARGET_NAME}"
COMPONENT ${NPU_INTERNAL_COMPONENT}
${OV_CPACK_COMP_NPU_INTERNAL_EXCLUDE_ALL})
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/README.md")
install(FILES "${CMAKE_CURRENT_SOURCE_DIR}/README.md"
- DESTINATION "tools/${TARGET_NAME}"
+ DESTINATION "share/OpenVINO/tools/${TARGET_NAME}"
COMPONENT ${NPU_INTERNAL_COMPONENT}
${OV_CPACK_COMP_NPU_INTERNAL_EXCLUDE_ALL})
endif()

4
openvino-rpmlintrc Normal file
View File

@ -0,0 +1,4 @@
addFilter("openvino-sample.*: E: devel-file-in-non-devel-package")
# These files are part of samples, meant for the user to copy and re-use, so env based hashbangs are preferred
addFilter("openvino-sample.*: E: env-script-interpreter")

510
openvino.changes Normal file
View File

@ -0,0 +1,510 @@
-------------------------------------------------------------------
Sun Dec 29 03:41:47 UTC 2024 - Alessandro de Oliveira Faria <cabelo@opensuse.org>
- openvino-onnx-ml-defines.patch and
openvino-remove-npu-compile-tool.patchhas been removed
as it is no longer needed in this version.
- Update to 2024.4.0
- Summary of major features and improvements
* OpenVINO 2024.6 release includes updates for enhanced
stability and improved LLM performance.
* Introduced support for Intel® Arc™ B-Series Graphics
(formerly known as Battlemage).
* Implemented optimizations to improve the inference time and
LLM performance on NPUs.
* Improved LLM performance with GenAI API optimizations and
bug fixes.
- Support Change and Deprecation Notices
* Using deprecated features and components is not advised. They
are available to enable a smooth transition to new solutions
and will be discontinued in the future. To keep using
discontinued features, you will have to revert to the last
LTS OpenVINO version supporting them. For more details, refer
to the OpenVINO Legacy Features and Components page.
* Discontinued in 2024.0:
+ Runtime components:
- Intel® Gaussian & Neural Accelerator (Intel®GNA)..
Consider using the Neural Processing Unit (NPU) for
low-powered systems like Intel® Core™ Ultra or 14th
generation and beyond.
- OpenVINO C++/C/Python 1.0 APIs (see 2023.3 API transition
guide for reference).
- All ONNX Frontend legacy API (known as ONNX_IMPORTER_API)
- 'PerfomanceMode.UNDEFINED' property as part of the
OpenVINO Python API
+ Tools:
- Deployment Manager. See installation and deployment
guides for current distribution options.
- Accuracy Checker.
- Post-Training Optimization Tool (POT).Neural Network
Compression Framework (NNCF) should be used instead.
- A Git patchfor NNCF integration with
huggingface/transformers. The recommended approachis
to usehuggingface/optimum-intelfor applying NNCF
optimization on top of models from Hugging Face.
- Support for Apache MXNet, Caffe, and Kaldi model formats.
Conversion to ONNX may be used as a solution.
* Deprecated and to be removed in the future:
+ The macOS x86_64 debug bins will no longer be provided
with the OpenVINO toolkit, starting with OpenVINO 2024.5.
+ Python 3.8 is no longer supported, starting with
OpenVINO 2024.5.
+ As MxNet doesnt support Python version higher than 3.8,
according to the MxNet PyPI project, it is no longer
supported by OpenVINO, either.
+ Discrete Keem Bay support is no longer supported, starting
with OpenVINO 2024.5.
+ Support for discrete devices (formerly codenamed Raptor
Lake) is no longer available for NPU.
-------------------------------------------------------------------
Tue Dec 10 15:50:41 UTC 2024 - Giacomo Comes <gcomes.obs@gmail.com>
- fix build on tumbleweed
* currently openvino does not support protobuf v22 or newer
-------------------------------------------------------------------
Tue Oct 15 00:56:54 UTC 2024 - Alessandro de Oliveira Faria <cabelo@opensuse.org>
- Temporarily inserted gcc-13 in Tumbleweed/Factory/Slowroll:
Because there is an incompatibility of the source code of the
level-zero library and npu module with gcc-14. I am working
with Intel on tests to return to native gcc.
- Update to 2024.4.0
- Summary of major features and improvements
* More Gen AI coverage and framework integrations to minimize
code changes
+ Support for GLM-4-9B Chat, MiniCPM-1B, Llama 3 and 3.1,
Phi-3-Mini, Phi-3-Medium and YOLOX-s models.
+ Noteworthy notebooks added: Florence-2, NuExtract-tiny
Structure Extraction, Flux.1 Image Generation, PixArt-α:
Photorealistic Text-to-Image Synthesis, and Phi-3-Vision
Visual Language Assistant.
* Broader Large Language Model (LLM) support and more model
compression techniques.
+ OpenVINO™ runtime optimized for Intel® Xe Matrix Extensions
(Intel® XMX) systolic arrays on built-in GPUs for efficient
matrix multiplication resulting in significant LLM
performance boost with improved 1st and 2nd token
latency, as well as a smaller memory footprint on
Intel® Core™ Ultra Processors (Series 2).
+ Memory sharing enabled for NPUs on Intel® Core™ Ultra
Processors (Series 2) for efficient pipeline integration
without memory copy overhead.
+ Addition of the PagedAttention feature for discrete GPUs*
enables a significant boost in throughput for parallel
inferencing when serving LLMs on Intel® Arc™ Graphics
or Intel® Data Center GPU Flex Series.
* More portability and performance to run AI at the edge,
in the cloud, or locally.
+ OpenVINO™ Model Server now comes with production-quality
support for OpenAI-compatible API which enables i
significantly higher throughput for parallel inferencing
on Intel® Xeon® processors when serving LLMs to many
concurrent users.
+ Improved performance and memory consumption with prefix
caching, KV cache compression, and other optimizations
for serving LLMs using OpenVINO™ Model Server.
+ Support for Python 3.12.
- Support Change and Deprecation Notices
* Using deprecated features and components is not advised.
They are available to enable a smooth transition to new
solutions and will be discontinued in the future.
To keep using discontinued features, you will have to
revert to the last LTS OpenVINO version supporting them.
For more details, refer to the OpenVINO Legacy Features
and Components page.
* Discontinued in 2024.0:
+ Runtime components:
- Intel® Gaussian & Neural Accelerator (Intel®GNA).
Consider using the Neural Processing Unit (NPU) for
low-powered systems like Intel® Core™ Ultra or
14th generation and beyond.
- OpenVINO C++/C/Python 1.0 APIs (see 2023.3 API
transition guide for reference).
- All ONNX Frontend legacy API (known as
ONNX_IMPORTER_API)
-'PerfomanceMode.UNDEFINED' property as part of the
OpenVINO Python API
+ Tools:
- Deployment Manager. See installation and deployment
guides for current distribution options.
- Accuracy Checker.
- Post-Training Optimization Tool (POT).Neural Network
Compression Framework (NNCF) should be used instead.
- A Git patchfor NNCF integration withhuggingface/
transformers. The recommended approachis to use
huggingface/optimum-intelfor applying NNCF
optimization on top of models from Hugging Face.
- Support for Apache MXNet, Caffe, and Kaldi model
formats. Conversion to ONNX may be used as a
solution.
* Deprecated and to be removed in the future:
+ The macOS x86_64 debug bins will no longer be
provided with the OpenVINO toolkit, starting with
OpenVINO 2024.5.
+ Python 3.8 is now considered deprecated, and it will not
be available beyond the 2024.4 OpenVINO version.
+ dKMB support is now considered deprecated and will be
fully removed with OpenVINO 2024.5
+ Intel® Streaming SIMD Extensions (Intel® SSE) will be
supported in source code form, but not enabled in the
binary package by default, starting with OpenVINO 2025.0
+ The openvino-nightly PyPI module will soon be discontinued.
End-users should proceed with the Simple PyPI nightly repo
instead. More information in Release Policy.
+ The OpenVINO™ Development Tools package (pip install
openvino-dev) will be removed from installation options and
distribution channels beginning with OpenVINO 2025.0.
+ Model Optimizer will be discontinued with OpenVINO 2025.0.
Consider using the new conversion methods instead. For more
details, see the model conversion transition guide.
+ OpenVINO property Affinity API will be discontinued with
OpenVINO 2025.0. It will be replaced with CPU binding
configurations (ov::hint::enable_cpu_pinning).
+ OpenVINO Model Server components:
- “auto shape” and “auto batch size” (reshaping a model in
runtime) will be removed in the future. OpenVINOs dynamic
shape models are recommended instead.
+ A number of notebooks have been deprecated. For an
up-to-date listing of available notebooks, refer to the
OpenVINO™ Notebook index (openvinotoolkit.github.io).
-------------------------------------------------------------------
Wed Oct 2 20:56:59 UTC 2024 - Giacomo Comes <gcomes.obs@gmail.com>
- Add Leap15 build
- Remove comment lines in the spec file that cause the insertion
of extra lines during a commit
-------------------------------------------------------------------
Sat Aug 10 01:41:06 UTC 2024 - Alessandro de Oliveira Faria <cabelo@opensuse.org>
- Remove NPU Compile Tool
* openvino-remove-npu-compile-tool.patch
- Update to 2024.3.0
- Summary of major features and improvements
* More Gen AI coverage and framework integrations to minimize
code changes
+ OpenVINO pre-optimized models are now available in Hugging
Face making it easier for developers to get started with
these models.
* Broader Large Language Model (LLM) support and more model
compression techniques.
+ Significant improvement in LLM performance on Intel
discrete GPUs with the addition of Multi-Head Attention
(MHA) and OneDNN enhancements.
* More portability and performance to run AI at the edge, in the
cloud, or locally.
+ Improved CPU performance when serving LLMs with the
inclusion of vLLM and continuous batching in the OpenVINO
Model Server (OVMS). vLLM is an easy-to-use open-source
library that supports efficient LLM inferencing and model
serving.
- Support Change and Deprecation Notices
* Using deprecated features and components is not advised.
They are available to enable a smooth transition to new
solutions and will be discontinued in the future. To keep
using discontinued features, you will have to revert to the
last LTS OpenVINO version supporting them. For more details,
refer to the OpenVINO Legacy Features and Components page.
* Discontinued in 2024.0:
+ Runtime components:
- Intel® Gaussian & Neural Accelerator (Intel®GNA)..Consider
using the Neural Processing Unit (NPU) for low-powered
systems like Intel® Core™ Ultra or 14th generation
and beyond.
- OpenVINO C++/C/Python 1.0 APIs (see 2023.3 API transition
guide for reference).
- All ONNX Frontend legacy API (known as ONNX_IMPORTER_API)
- 'PerfomanceMode.UNDEFINED' property as part of the OpenVINO
Python API
+ Tools:
- Deployment Manager. See installation and deployment guides
for current distribution options.
- Accuracy Checker.
- Post-Training Optimization Tool (POT).Neural Network
Compression Framework (NNCF) should be used instead.
- A Git patchfor NNCF integration withhuggingface/
transformers. The recommended approachis to use
huggingface/optimum-intelfor applying NNCF optimization
on top of models from Hugging Face.
- Support for Apache MXNet, Caffe, and Kaldi model formats.
Conversion to ONNX may be used as a solution.
* Deprecated and to be removed in the future:
+ The OpenVINO™ Development Tools package (pip install
openvino-dev) will be removed from installation options
and distribution channels beginning with OpenVINO 2025.0.
+ Model Optimizer will be discontinued with OpenVINO 2025.0.
Consider using the new conversion methods instead. For
more details, see the model conversion transition guide.
+ OpenVINO property Affinity API will be discontinued with
OpenVINO 2025.0. It will be replaced with CPU binding
configurations (ov::hint::enable_cpu_pinning).
+ OpenVINO Model Server components:
- “auto shape” and “auto batch size” (reshaping a model
in runtime) will be removed in the future. OpenVINOs
dynamic shape models are recommended instead.
+ A number of notebooks have been deprecated. For an
up-to-date listing of available notebooks, refer to
the OpenVINO™ Notebook index (openvinotoolkit.github.io).
-------------------------------------------------------------------
Sat Jun 22 12:01:23 UTC 2024 - Andreas Schwab <schwab@suse.de>
- Add riscv-cpu-plugin subpackage
-------------------------------------------------------------------
Wed Jun 19 21:36:01 UTC 2024 - Alessandro de Oliveira Faria <cabelo@opensuse.org>
- Update to 2024.2.0
- More Gen AI coverage and framework integrations to minimize code
changes
* Llama 3 optimizations for CPUs, built-in GPUs, and discrete
GPUs for improved performance and efficient memory usage.
* Support for Phi-3-mini, a family of AI models that leverages
the power of small language models for faster, more accurate
and cost-effective text processing.
* Python Custom Operation is now enabled in OpenVINO making it
easier for Python developers to code their custom operations
instead of using C++ custom operations (also supported).
Python Custom Operation empowers users to implement their own
specialized operations into any model.
* Notebooks expansion to ensure better coverage for new models.
Noteworthy notebooks added: DynamiCrafter, YOLOv10, Chatbot
notebook with Phi-3, and QWEN2.
- Broader Large Language Model (LLM) support and more model
compression techniques.
* GPTQ method for 4-bit weight compression added to NNCF for
more efficient inference and improved performance of
compressed LLMs.
* Significant LLM performance improvements and reduced latency
for both built-in GPUs and discrete GPUs.
* Significant improvement in 2nd token latency and memory
footprint of FP16 weight LLMs on AVX2 (13th Gen Intel® Core™
processors) and AVX512 (3rd Gen Intel® Xeon® Scalable
Processors) based CPU platforms, particularly for small
batch sizes.
- More portability and performance to run AI at the edge, in the
cloud, or locally.
* Model Serving Enhancements:
* Preview: OpenVINO Model Server (OVMS) now supports
OpenAI-compatible API along with Continuous Batching and
PagedAttention, enabling significantly higher throughput
for parallel inferencing, especially on Intel® Xeon®
processors, when serving LLMs to many concurrent users.
* OpenVINO backend for Triton Server now supports built-in
GPUs and discrete GPUs, in addition to dynamic
shapes support.
* Integration of TorchServe through torch.compile OpenVINO
backend for easy model deployment, provisioning to
multiple instances, model versioning, and maintenance.
* Preview: addition of the Generate API, a simplified API
for text generation using large language models with only
a few lines of code. The API is available through the newly
launched OpenVINO GenAI package.
* Support for Intel Atom® Processor X Series. For more details,
see System Requirements.
* Preview: Support for Intel® Xeon® 6 processor.
- Support Change and Deprecation Notices
* Using deprecated features and components is not advised.
They are available to enable a smooth transition to new
solutions and will be discontinued in the future.
To keep using discontinued features, you will have to revert
to the last LTS OpenVINO version supporting them. For more
details, refer to the OpenVINO Legacy Features and
Components page.
* Discontinued in 2024.0:
+ Runtime components:
- Intel® Gaussian & Neural Accelerator (Intel®GNA).
Consider using the Neural Processing Unit (NPU) for
low-powered systems like Intel® Core™ Ultra or 14th
generation and beyond.
- OpenVINO C++/C/Python 1.0 APIs (see 2023.3 API
transition guide for reference).
- All ONNX Frontend legacy API (known as ONNX_IMPORTER_API)
- 'PerfomanceMode.UNDEFINED' property as part of the
OpenVINO Python API
+ Tools:
- Deployment Manager. See installation and deployment
guides for current distribution options.
- Accuracy Checker.
- Post-Training Optimization Tool (POT).Neural Network
Compression Framework (NNCF) should be used instead.
- A Git patchfor NNCF integration with
huggingface/transformers. The recommended approach
is to usehuggingface/optimum-intelfor applying NNCF
optimization on top of models from Hugging Face.
- Support for Apache MXNet, Caffe, and Kaldi model formats.
Conversion to ONNX may be used as a solution.
* Deprecated and to be removed in the future:
+ The OpenVINO™ Development Tools package (pip install
openvino-dev) will be removed from installation options
and distribution channels beginning with OpenVINO 2025.0.
+ Model Optimizer will be discontinued with OpenVINO 2025.0.
Consider using the new conversion methods instead. For
more details, see the model conversion transition guide.
+ OpenVINO property Affinity API will be discontinued with
OpenVINO 2025.0. It will be replaced with CPU binding
configurations (ov::hint::enable_cpu_pinning).
+ OpenVINO Model Server components:
+ “auto shape” and “auto batch size” (reshaping a model in
runtime) will be removed in the future. OpenVINOs dynamic
shape models are recommended instead.
+ A number of notebooks have been deprecated. For an
up-to-date listing of available notebooks, refer to the
OpenVINO™ Notebook index (openvinotoolkit.github.io).
-------------------------------------------------------------------
Thu May 9 22:56:53 UTC 2024 - Alessandro de Oliveira Faria <cabelo@opensuse.org>
- Fix sample source path in build script:
* openvino-fix-build-sample-path.patch
- Update to 2024.1.0
- More Generative AI coverage and framework integrations to
minimize code changes.
* Mixtral and URLNet models optimized for performance
improvements on Intel® Xeon® processors.
* Stable Diffusion 1.5, ChatGLM3-6B, and Qwen-7B models
optimized for improved inference speed on Intel® Core™
Ultra processors with integrated GPU.
* Support for Falcon-7B-Instruct, a GenAI Large Language Model
(LLM) ready-to-use chat/instruct model with superior
performance metrics.
* New Jupyter Notebooks added: YOLO V9, YOLO V8
Oriented Bounding Boxes Detection (OOB), Stable Diffusion
in Keras, MobileCLIP, RMBG-v1.4 Background Removal, Magika,
TripoSR, AnimateAnyone, LLaVA-Next, and RAG system with
OpenVINO and LangChain.
- Broader Large Language Model (LLM) support and more model
compression techniques.
* LLM compilation time reduced through additional optimizations
with compressed embedding. Improved 1st token performance of
LLMs on 4th and 5th generations of Intel® Xeon® processors
with Intel® Advanced Matrix Extensions (Intel® AMX).
* Better LLM compression and improved performance with oneDNN,
INT4, and INT8 support for Intel® Arc™ GPUs.
* Significant memory reduction for select smaller GenAI
models on Intel® Core™ Ultra processors with integrated GPU.
- More portability and performance to run AI at the edge,
in the cloud, or locally.
* The preview NPU plugin for Intel® Core™ Ultra processors
is now available in the OpenVINO open-source GitHub
repository, in addition to the main OpenVINO package on PyPI.
* The JavaScript API is now more easily accessible through
the npm repository, enabling JavaScript developers seamless
access to the OpenVINO API.
* FP16 inference on ARM processors now enabled for the
Convolutional Neural Network (CNN) by default.
- Support Change and Deprecation Notices
* Using deprecated features and components is not advised. They
are available to enable a smooth transition to new solutions
and will be discontinued in the future. To keep using
Discontinued features, you will have to revert to the last
LTS OpenVINO version supporting them.
* For more details, refer to theOpenVINO Legacy Features
and Components page.
* Discontinued in 2024.0:
+ Runtime components:
- Intel® Gaussian & Neural Accelerator (Intel®GNA).
Consider using the Neural Processing Unit (NPU)
for low-powered systems like Intel® Core™ Ultra or
14thgeneration and beyond.
- OpenVINO C++/C/Python 1.0 APIs (see 2023.3 API
transition guide for reference).
- All ONNX Frontend legacy API (known as
ONNX_IMPORTER_API)
- 'PerfomanceMode.UNDEFINED' property as part of
the OpenVINO Python API
+ Tools:
- Deployment Manager. See installation and deployment
guides for current distribution options.
- Accuracy Checker.
- Post-Training Optimization Tool (POT).Neural Network
Compression Framework (NNCF) should be used instead.
- A Git patchfor NNCF integration with
huggingface/transformers. The recommended approach
is to usehuggingface/optimum-intelfor applying
NNCF optimization on top of models from Hugging
Face.
- Support for Apache MXNet, Caffe, and Kaldi model
formats. Conversion to ONNX may be used as
a solution.
* Deprecated and to be removed in the future:
+ The OpenVINO™ Development Tools package (pip install
openvino-dev) will be removed from installation options
and distribution channels beginning with OpenVINO 2025.0.
+ Model Optimizer will be discontinued with OpenVINO 2025.0.
Consider using the new conversion methods instead. For
more details, see the model conversion transition guide.
+ OpenVINO property Affinity API will be discontinued with
OpenVINO 2025.0. It will be replaced with CPU binding
configurations (ov::hint::enable_cpu_pinning).
+ OpenVINO Model Server components:
- “auto shape” and “auto batch size” (reshaping a model
in runtime) will be removed in the future. OpenVINOs
dynamic shape models are recommended instead.
-------------------------------------------------------------------
Tue Apr 23 18:57:17 UTC 2024 - Atri Bhattacharya <badshah400@gmail.com>
- License update: play safe and list all third party licenses as
part of the License tag.
-------------------------------------------------------------------
Tue Apr 23 12:42:32 UTC 2024 - Atri Bhattacharya <badshah400@gmail.com>
- Switch to _service file as tagged Source tarball does not
include `./thirdparty` submodules.
- Update openvino-fix-install-paths.patch to fix python module
install path.
- Enable python module and split it out into a python subpackage
(for now default python3 only).
- Explicitly build python metadata (dist-info) and install it
(needs simple sed hackery to support "officially" unsupported
platform ppc64le).
- Specify ENABLE_JS=OFF to turn off javascript bindings as
building these requires downloading npm stuff from the network.
- Build with system pybind11.
- Bump _constraints for updated disk space requirements.
- Drop empty %check section, rpmlint was misleading when it
recommended adding this.
-------------------------------------------------------------------
Fri Apr 19 08:08:02 UTC 2024 - Atri Bhattacharya <badshah400@gmail.com>
- Numerous specfile cleanups:
* Drop redundant `mv` commands and use `install` where
appropriate.
* Build with system protobuf.
* Fix Summary tags.
* Trim package descriptions.
* Drop forcing CMAKE_BUILD_TYPE=Release, let macro default
RelWithDebInfo be used instead.
* Correct naming of shared library packages.
* Separate out libopenvino_c.so.* into own shared lib package.
* Drop rpmlintrc rule used to hide shlib naming mistakes.
* Rename Source tarball to %{name}-%{version}.EXT pattern.
* Use ldconfig_scriptlet macro for post(un).
- Add openvino-onnx-ml-defines.patch -- Define ONNX_ML at compile
time when using system onnx to allow using 'onnx-ml.pb.h'
instead of 'onnx.pb.h', the latter not being shipped with
openSUSE's onnx-devel package (gh#onnx/onnx#3074).
- Add openvino-fix-install-paths.patch: Change hard-coded install
paths in upstream cmake macro to standard Linux dirs.
- Add openvino-ComputeLibrary-include-string.patch: Include header
for std::string.
- Add external devel packages as Requires for openvino-devel.
- Pass -Wl,-z,noexecstack to %build_ldflags to avoid an exec stack
issue with intel CPU plugin.
- Use ninja for build.
- Adapt _constraits file for correct disk space and memory
requirements.
- Add empty %check section.
-------------------------------------------------------------------
Mon Apr 15 03:18:33 UTC 2024 - Alessandro de Oliveira Faria <cabelo@opensuse.org>
- Initial package
- Version 2024.0.0
- Add openvino-rpmlintrc.

4
openvino.obsinfo Normal file
View File

@ -0,0 +1,4 @@
name: openvino
version: 2024.6.0
mtime: 1734008710
commit: 4c0f47d233538e67a8eb4414c85617e85be55fc4

436
openvino.spec Normal file
View File

@ -0,0 +1,436 @@
#
# spec file for package openvino
#
# Copyright (c) 2024 SUSE LLC
# Copyright (c) 2024 Alessandro de Oliveira Faria (A.K.A. CABELO) <cabelo@opensuse.org> or <alessandro.faria@owasp.org>
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
# upon. The license for this file, and modifications and additions to the
# file, is the same license as for the pristine package itself (unless the
# license for the pristine package is not an Open Source License, in which
# case the license is the MIT License). An "Open Source License" is a
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.
# Please submit bugfixes or comments via https://bugs.opensuse.org/
#
%if 0%{?suse_version} < 1600
%define isLeap15 %nil
%else
%undefine isLeap15
%endif
# Compilation takes ~1 hr on OBS for a single python, don't try all supported flavours
%if %{defined isLeap15}
%define x86_64 x86_64
%define pythons python311
%else
%define pythons python3
%endif
%define __builder ninja
%define so_ver 2460
%define shlib lib%{name}%{so_ver}
%define shlib_c lib%{name}_c%{so_ver}
%define prj_name OpenVINO
Name: openvino
Version: 2024.6.0
Release: 0
Summary: A toolkit for optimizing and deploying AI inference
# Let's be safe and put all third party licenses here, no matter that we use specific thirdparty libs or not
License: Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND HPND AND JSON AND MIT AND OFL-1.1 AND Zlib
URL: https://github.com/openvinotoolkit/openvino
Source0: %{name}-%{version}.tar.zst
Source1: %{name}-rpmlintrc
# PATCH-FEATURE-OPENSUSE openvino-fix-install-paths.patch badshah400@gmail.com -- Fix installation paths hardcoded into upstream defined cmake macros
Patch0: openvino-fix-install-paths.patch
# PATCH-FIX-UPSTREAM openvino-ComputeLibrary-include-string.patch badshah400@gmail.com -- Include header for std::string
Patch1: openvino-ComputeLibrary-include-string.patch
# PATCH-FIX-UPSTREAM openvino-fix-build-sample-path.patch cabelo@opensuse.org -- Fix sample source path in build script
Patch2: openvino-fix-build-sample-path.patch
BuildRequires: ade-devel
BuildRequires: cmake
BuildRequires: fdupes
BuildRequires: gcc13-c++
BuildRequires: ninja
BuildRequires: opencl-cpp-headers
# FIXME: /usr/include/onnx/onnx-ml.pb.h:17:2: error: This file was generated by
# an older version of protoc which is incompatible with your Protocol Buffer
# headers. Please regenerate this file with a newer version of protoc.
#BuildRequires: cmake(ONNX)
BuildRequires: pkgconfig
BuildRequires: %{python_module devel}
BuildRequires: %{python_module pip}
BuildRequires: %{python_module pybind11-devel}
BuildRequires: %{python_module setuptools}
BuildRequires: %{python_module wheel}
BuildRequires: python-rpm-macros
BuildRequires: zstd
BuildRequires: pkgconfig(flatbuffers)
BuildRequires: pkgconfig(libva)
BuildRequires: pkgconfig(nlohmann_json)
BuildRequires: pkgconfig(ocl-icd)
BuildRequires: pkgconfig(protobuf) < 22
BuildRequires: pkgconfig(pugixml)
%if %{defined isLeap15}
BuildRequires: opencl-headers
BuildRequires: snappy-devel
BuildRequires: tbb-devel
%else
BuildRequires: pkgconfig(OpenCL-Headers)
BuildRequires: pkgconfig(snappy)
BuildRequires: pkgconfig(tbb)
%endif
BuildRequires: pkgconfig(zlib)
%ifarch %{arm64}
BuildRequires: scons
%endif
# No 32-bit support
ExcludeArch: %{ix86} %{arm32} ppc
%define python_subpackage_only 1
%python_subpackages
%description
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
%package -n %{shlib}
Summary: Shared library for OpenVINO toolkit
%description -n %{shlib}
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the shared library for OpenVINO.
%package -n %{shlib_c}
Summary: Shared C library for OpenVINO toolkit
%description -n %{shlib_c}
This package provides the C library for OpenVINO.
%package -n %{name}-devel
Summary: Headers and sources for OpenVINO toolkit
Requires: %{shlib_c} = %{version}
Requires: %{shlib} = %{version}
Requires: lib%{name}_ir_frontend%{so_ver} = %{version}
Requires: lib%{name}_onnx_frontend%{so_ver} = %{version}
Requires: lib%{name}_paddle_frontend%{so_ver} = %{version}
Requires: lib%{name}_pytorch_frontend%{so_ver} = %{version}
Requires: lib%{name}_tensorflow_frontend%{so_ver} = %{version}
Requires: lib%{name}_tensorflow_lite_frontend%{so_ver} = %{version}
Requires: pkgconfig(flatbuffers)
Requires: pkgconfig(libva)
Requires: pkgconfig(nlohmann_json)
Requires: pkgconfig(ocl-icd)
Requires: pkgconfig(protobuf)
Requires: pkgconfig(pugixml)
%if %{defined isLeap15}
Requires: opencl-headers
Requires: snappy-devel
Requires: tbb-devel
%else
Requires: pkgconfig(OpenCL-Headers)
Requires: pkgconfig(snappy)
Requires: pkgconfig(tbb)
%endif
Recommends: %{name}-auto-batch-plugin = %{version}
Recommends: %{name}-auto-plugin = %{version}
Recommends: %{name}-hetero-plugin = %{version}
Recommends: %{name}-intel-cpu-plugin = %{version}
%ifarch riscv64
Recommends: %{name}-riscv-cpu-plugin = %{version}
%endif
%description -n %{name}-devel
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the headers and sources for developing applications with
OpenVINO.
%package -n %{name}-arm-cpu-plugin
Summary: Intel CPU plugin for OpenVINO toolkit
%description -n %{name}-arm-cpu-plugin
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the ARM CPU plugin for OpenVINO on %{arm64} archs.
%package -n %{name}-riscv-cpu-plugin
Summary: RISC-V CPU plugin for OpenVINO toolkit
%description -n %{name}-riscv-cpu-plugin
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the RISC-V CPU plugin for OpenVINO on riscv64 archs.
%package -n %{name}-auto-plugin
Summary: Auto / Multi software plugin for OpenVINO toolkit
%description -n %{name}-auto-plugin
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the Auto / Multi software plugin for OpenVINO.
%package -n %{name}-auto-batch-plugin
Summary: Automatic batch software plugin for OpenVINO toolkit
%description -n %{name}-auto-batch-plugin
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the automatic batch software plugin for OpenVINO.
%package -n %{name}-hetero-plugin
Summary: Hetero frontend for Intel OpenVINO toolkit
%description -n %{name}-hetero-plugin
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the hetero frontend for OpenVINO.
%package -n %{name}-intel-cpu-plugin
Summary: Intel CPU plugin for OpenVINO toolkit
%description -n %{name}-intel-cpu-plugin
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the intel CPU plugin for OpenVINO for %{x86_64} archs.
%package -n %{name}-intel-npu-plugin
Summary: Intel NPU plugin for OpenVINO toolkit
%description -n %{name}-intel-npu-plugin
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the intel NPU plugin for OpenVINO for %{x86_64} archs.
%package -n lib%{name}_ir_frontend%{so_ver}
Summary: Paddle frontend for Intel OpenVINO toolkit
%description -n lib%{name}_ir_frontend%{so_ver}
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the ir frontend for OpenVINO.
%package -n lib%{name}_onnx_frontend%{so_ver}
Summary: Onnx frontend for OpenVINO toolkit
%description -n lib%{name}_onnx_frontend%{so_ver}
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the onnx frontend for OpenVINO.
%package -n lib%{name}_paddle_frontend%{so_ver}
Summary: Paddle frontend for Intel OpenVINO toolkit
%description -n lib%{name}_paddle_frontend%{so_ver}
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the paddle frontend for OpenVINO.
%package -n lib%{name}_pytorch_frontend%{so_ver}
Summary: PyTorch frontend for OpenVINO toolkit
%description -n lib%{name}_pytorch_frontend%{so_ver}
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the pytorch frontend for OpenVINO.
%package -n lib%{name}_tensorflow_frontend%{so_ver}
Summary: TensorFlow frontend for OpenVINO toolkit
%description -n lib%{name}_tensorflow_frontend%{so_ver}
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the tensorflow frontend for OpenVINO.
%package -n lib%{name}_tensorflow_lite_frontend%{so_ver}
Summary: TensorFlow Lite frontend for OpenVINO toolkit
%description -n lib%{name}_tensorflow_lite_frontend%{so_ver}
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides the tensorflow-lite frontend for OpenVINO.
%package -n python-openvino
Summary: Python module for openVINO toolkit
Requires: python-numpy < 2
Requires: python-openvino-telemetry
%description -n python-openvino
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides a Python module for interfacing with openVINO toolkit.
%package -n %{name}-sample
Summary: Samples for use with OpenVINO toolkit
BuildArch: noarch
%description -n %{name}-sample
OpenVINO is an open-source toolkit for optimizing and deploying AI inference.
This package provides some samples for use with openVINO.
%prep
%autosetup -p1
%build
export CC=gcc-13 CXX=g++-13
# Otherwise intel_cpu plugin declares an executable stack
%ifarch %{x86_64}
%define build_ldflags -Wl,-z,noexecstack
%endif
%cmake \
-DCMAKE_CXX_STANDARD=17 \
-DBUILD_SHARED_LIBS=ON \
-DENABLE_OV_ONNX_FRONTEND=ON \
-DENABLE_OV_PADDLE_FRONTEND=ON \
-DENABLE_OV_PYTORCH_FRONTEND=ON \
-DENABLE_OV_IR_FRONTEND=ON \
-DENABLE_OV_TF_FRONTEND=ON \
-DENABLE_OV_TF_LITE_FRONTEND=ON \
-DENABLE_INTEL_GPU=OFF \
-DENABLE_JS=OFF \
-DENABLE_PYTHON=ON \
-DENABLE_WHEEL=OFF \
-DENABLE_SYSTEM_OPENCL=ON \
-DENABLE_SYSTEM_PROTOBUF=ON \
-DENABLE_SYSTEM_PUGIXML=ON \
-DENABLE_SYSTEM_SNAPPY=ON \
-DENABLE_SYSTEM_TBB=ON \
%if %{defined isLeap15}
-DENABLE_TBBBIND_2_5=OFF \
%endif
-DONNX_USE_PROTOBUF_SHARED_LIBS=ON \
-DProtobuf_USE_STATIC_LIBS=OFF \
%{nil}
%cmake_build
# Manually generate dist-info dir
export WHEEL_VERSION=%{version} \
BUILD_TYPE=RelWithDebInfo
%ifarch %{power64}
# RelWithDebInfo
# Manual hackery for power64 because it not "officially" supported
sed -i "s/{ARCH}/%{_arch}/" ../src/bindings/python/wheel/setup.py
%endif
%python_exec ../src/bindings/python/wheel/setup.py dist_info -o ../
%install
%cmake_install
# Hash-bangs in non-exec python sample scripts
sed -Ei "1{\@/usr/bin/env@d}" \
%{buildroot}%{_datadir}/%{prj_name}/samples/python/benchmark/bert_benchmark/bert_benchmark.py \
%{buildroot}%{_datadir}/%{prj_name}/samples/python/benchmark/sync_benchmark/sync_benchmark.py \
%{buildroot}%{_datadir}/%{prj_name}/samples/python/benchmark/throughput_benchmark/throughput_benchmark.py \
%{buildroot}%{_datadir}/%{prj_name}/samples/python/classification_sample_async/classification_sample_async.py \
%{buildroot}%{_datadir}/%{prj_name}/samples/python/hello_classification/hello_classification.py \
%{buildroot}%{_datadir}/%{prj_name}/samples/python/hello_query_device/hello_query_device.py \
%{buildroot}%{_datadir}/%{prj_name}/samples/python/hello_reshape_ssd/hello_reshape_ssd.py \
%{buildroot}%{_datadir}/%{prj_name}/samples/python/model_creation_sample/model_creation_sample.py
# Unnecessary if we get our package dependencies and lib paths right!
rm -fr %{buildroot}%{_prefix}/install_dependencies \
%{buildroot}%{_prefix}/setupvars.sh
%{python_expand rm %{buildroot}%{$python_sitearch}/requirements.txt
chmod -x %{buildroot}%{$python_sitearch}/%{name}/tools/ovc/ovc.py
cp -r %{name}-%{version}.dist-info %{buildroot}%{$python_sitearch}/
%fdupes %{buildroot}%{$python_sitearch}/%{name}/
}
%fdupes %{buildroot}%{_datadir}/
# We do not use bundled thirdparty libs
rm -fr %{buildroot}%{_datadir}/licenses/*
%ldconfig_scriptlets -n %{shlib}
%ldconfig_scriptlets -n %{shlib_c}
%ldconfig_scriptlets -n lib%{name}_ir_frontend%{so_ver}
%ldconfig_scriptlets -n lib%{name}_onnx_frontend%{so_ver}
%ldconfig_scriptlets -n lib%{name}_paddle_frontend%{so_ver}
%ldconfig_scriptlets -n lib%{name}_pytorch_frontend%{so_ver}
%ldconfig_scriptlets -n lib%{name}_tensorflow_lite_frontend%{so_ver}
%ldconfig_scriptlets -n lib%{name}_tensorflow_frontend%{so_ver}
%files -n %{shlib}
%license LICENSE
%{_libdir}/libopenvino.so.*
%files -n %{shlib_c}
%license LICENSE
%{_libdir}/libopenvino_c.so.*
%files -n %{name}-auto-batch-plugin
%dir %{_libdir}/%{prj_name}
%{_libdir}/%{prj_name}/libopenvino_auto_batch_plugin.so
%files -n %{name}-auto-plugin
%dir %{_libdir}/%{prj_name}
%{_libdir}/%{prj_name}/libopenvino_auto_plugin.so
%ifarch %{x86_64}
%files -n %{name}-intel-cpu-plugin
%dir %{_libdir}/%{prj_name}
%{_libdir}/%{prj_name}/libopenvino_intel_cpu_plugin.so
%files -n %{name}-intel-npu-plugin
%dir %{_libdir}/%{prj_name}
%{_libdir}/%{prj_name}/libopenvino_intel_npu_plugin.so
%endif
%ifarch %{arm64}
%files -n %{name}-arm-cpu-plugin
%dir %{_libdir}/%{prj_name}
%{_libdir}/%{prj_name}/libopenvino_arm_cpu_plugin.so
%endif
%ifarch riscv64
%files -n %{name}-riscv-cpu-plugin
%dir %{_libdir}/%{prj_name}
%{_libdir}/%{prj_name}/libopenvino_riscv_cpu_plugin.so
%endif
%files -n %{name}-hetero-plugin
%dir %{_libdir}/%{prj_name}
%{_libdir}/%{prj_name}/libopenvino_hetero_plugin.so
%files -n lib%{name}_onnx_frontend%{so_ver}
%{_libdir}/libopenvino_onnx_frontend.so.*
%files -n lib%{name}_ir_frontend%{so_ver}
%{_libdir}/libopenvino_ir_frontend.so.*
%files -n lib%{name}_paddle_frontend%{so_ver}
%{_libdir}/libopenvino_paddle_frontend.so.*
%files -n lib%{name}_pytorch_frontend%{so_ver}
%{_libdir}/libopenvino_pytorch_frontend.so.*
%files -n lib%{name}_tensorflow_frontend%{so_ver}
%{_libdir}/libopenvino_tensorflow_frontend.so.*
%files -n lib%{name}_tensorflow_lite_frontend%{so_ver}
%{_libdir}/libopenvino_tensorflow_lite_frontend.so.*
%files -n %{name}-sample
%license LICENSE
%{_datadir}/%{prj_name}/
%exclude %{_prefix}/lib/debug/usr/share/OpenVINO/tools/compile_tool/
%files -n %{name}-devel
%license LICENSE
%{_includedir}/%{name}/
%{_libdir}/cmake/%{prj_name}/
%{_libdir}/*.so
%{_libdir}/pkgconfig/openvino.pc
%files %{python_files openvino}
%license LICENSE
%{python_sitearch}/openvino/
%{python_sitearch}/openvino*-info/
%changelog