From 3ac7c6a6fed93881077e30fea5d2785ce78fe93b0f2748df0dcee6b4eff2cc91 Mon Sep 17 00:00:00 2001 From: Christian Goll Date: Tue, 20 Aug 2024 13:01:12 +0000 Subject: [PATCH] =?UTF-8?q?-=20Update=20to=202024.3.0=20-=20Summary=20of?= =?UTF-8?q?=20major=20features=20and=20improvements=E2=80=AF=E2=80=AF=20?= =?UTF-8?q?=20=20*=20More=20Gen=20AI=20coverage=20and=20framework=20integr?= =?UTF-8?q?ations=20to=20minimize=20=20=20=20=20code=20changes=20=20=20=20?= =?UTF-8?q?=20+=20OpenVINO=20pre-optimized=20models=20are=20now=20availabl?= =?UTF-8?q?e=20in=20Hugging=20=20=20=20=20=20=20Face=20making=20it=20easie?= =?UTF-8?q?r=20for=20developers=20to=20get=20started=20with=20=20=20=20=20?= =?UTF-8?q?=20=20these=20models.=20=20=20*=20Broader=20Large=20Language=20?= =?UTF-8?q?Model=20(LLM)=20support=20and=20more=20model=20=20=20=20=20comp?= =?UTF-8?q?ression=20techniques.=20=20=20=20=20+=20Significant=20improveme?= =?UTF-8?q?nt=20in=20LLM=20performance=20on=20Intel=20=20=20=20=20=20=20di?= =?UTF-8?q?screte=20GPUs=20with=20the=20addition=20of=20Multi-Head=20Atten?= =?UTF-8?q?tion=20=20=20=20=20=20=20(MHA)=20and=20OneDNN=20enhancements.?= =?UTF-8?q?=20=20=20*=20More=20portability=20and=20performance=20to=20run?= =?UTF-8?q?=20AI=20at=20the=20edge,=20in=20the=20=20=20=20=20cloud,=20or?= =?UTF-8?q?=20locally.=20=20=20=20=20+=20Improved=20CPU=20performance=20wh?= =?UTF-8?q?en=20serving=20LLMs=20with=20the=20=20=20=20=20=20=20inclusion?= =?UTF-8?q?=20of=20vLLM=20and=20continuous=20batching=20in=20the=20OpenVIN?= =?UTF-8?q?O=20=20=20=20=20=20=20Model=20Server=20(OVMS).=20vLLM=20is=20an?= =?UTF-8?q?=20easy-to-use=20open-source=20=20=20=20=20=20=20library=20that?= =?UTF-8?q?=20supports=20efficient=20LLM=20inferencing=20and=20model=20=20?= =?UTF-8?q?=20=20=20=20=20serving.=20=20=20=20=20+=20Ubuntu=2024.04=20long?= =?UTF-8?q?-term=20support=20(LTS),=2064-bit=20(Kernel=206.8+)=20=20=20=20?= =?UTF-8?q?=20=20=20(preview=20support)=20-=20Support=20Change=20and=20Dep?= =?UTF-8?q?recation=20Notices=20=20=20*=20Using=20deprecated=20features=20?= =?UTF-8?q?and=20components=20is=20not=20advised.=20=20=20=20=20They=20are?= =?UTF-8?q?=20available=20to=20enable=20a=20smooth=20transition=20to=20new?= =?UTF-8?q?=20=20=20=20=20solutions=20and=20will=20be=20discontinued=20in?= =?UTF-8?q?=20the=20future.=20To=20keep=20=20=20=20=20using=20discontinued?= =?UTF-8?q?=20features,=20you=20will=20have=20to=20revert=20to=20the=20=20?= =?UTF-8?q?=20=20=20last=20LTS=20OpenVINO=20version=20supporting=20them.?= =?UTF-8?q?=20For=20more=20details,=20=20=20=20=20refer=20to=20the=20OpenV?= =?UTF-8?q?INO=20Legacy=20Features=20and=20Components=20page.=20=20=20*=20?= =?UTF-8?q?Discontinued=20in=202024.0:=20=20=20=20=20+=20Runtime=20compone?= =?UTF-8?q?nts:=20=20=20=20=20=20=20-=20Intel=C2=AE=20Gaussian=20&=20Neura?= =?UTF-8?q?l=20Accelerator=20(Intel=C2=AE=E2=80=AFGNA)..Consider=20=20=20?= =?UTF-8?q?=20=20=20=20=20=20using=20the=20Neural=20Processing=20Unit=20(N?= =?UTF-8?q?PU)=20for=20low-powered=20=20=20=20=20=20=20=20=20systems=20lik?= =?UTF-8?q?e=20Intel=C2=AE=20Core=E2=84=A2=20Ultra=20or=2014th=20generatio?= =?UTF-8?q?n=20=20=20=20=20=20=20=20=20and=20beyond.=20=20=20=20=20=20=20-?= =?UTF-8?q?=20OpenVINO=20C++/C/Python=201.0=20APIs=20(see=202023.3=20API?= =?UTF-8?q?=20transition=20=20=20=20=20=20=20=20=20guide=20for=20reference?= =?UTF-8?q?).=20=20=20=20=20=20=20-=20All=20ONNX=20Frontend=20legacy=20API?= =?UTF-8?q?=20(known=20as=20ONNX=5FIMPORTER=5FAPI)=20=20=20=20=20=20=20-?= =?UTF-8?q?=20'PerfomanceMode.UNDEFINED'=20property=20as=20part=20of=20the?= =?UTF-8?q?=20OpenVINO=20=20=20=20=20=20=20=20=20=20Python=20API=20=20=20?= =?UTF-8?q?=20=20+=20Tools:=20=20=20=20=20=20=20-=20Deployment=20Manager.?= =?UTF-8?q?=20See=20installation=20and=20deployment=20guides=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20for=20current=20distribution=20options.=20=20=20?= =?UTF-8?q?=20=20=20=20-=20Accuracy=20Checker.=20=20=20=20=20=20=20-=20Pos?= =?UTF-8?q?t-Training=20Optimization=20Tool=20(POT).=E2=80=AFNeural=20Netw?= =?UTF-8?q?ork=20=20=20=20=20=20=20=20=20Compression=20Framework=20(NNCF)?= =?UTF-8?q?=20should=20be=20used=20instead.=20=20=20=20=20=20=20-=20A=20Gi?= =?UTF-8?q?t=20patch=E2=80=AFfor=20NNCF=20integration=20with=E2=80=AFhuggi?= =?UTF-8?q?ngface/=20=20=20=20=20=20=20=20=20transformers.=20The=20recomme?= =?UTF-8?q?nded=20approach=E2=80=AFis=20to=20use=20=20=20=20=20=20=20=20?= =?UTF-8?q?=20huggingface/optimum-intel=E2=80=AFfor=20applying=20NNCF=20op?= =?UTF-8?q?timization=20=20=20=20=20=20=20=20=20on=20top=20of=20models=20f?= =?UTF-8?q?rom=20Hugging=20Face.=20=20=20=20=20=20=20-=20Support=20for=20A?= =?UTF-8?q?pache=20MXNet,=20Caffe,=20and=20Kaldi=20model=20formats.=20=20?= =?UTF-8?q?=20=20=20=20=20=20=20Conversion=20to=20ONNX=20may=20be=20used?= =?UTF-8?q?=20as=20a=20solution.=20=20=20*=20Deprecated=20and=20to=20be=20?= =?UTF-8?q?removed=20in=20the=20future:=20=20=20=20=20+=20The=20OpenVINO?= =?UTF-8?q?=E2=84=A2=20Development=20Tools=20package=20(pip=20install=20?= =?UTF-8?q?=20=20=20=20=20=20openvino-dev)=20will=20be=20removed=20from=20?= =?UTF-8?q?installation=20options=20=20=20=20=20=20=20and=20distribution?= =?UTF-8?q?=20channels=20beginning=20with=20OpenVINO=202025.0.=20=20=20=20?= =?UTF-8?q?=20+=20Model=20Optimizer=20will=20be=20discontinued=20with=20Op?= =?UTF-8?q?enVINO=202025.0.=20=20=20=20=20=20=20Consider=20using=20the=20n?= =?UTF-8?q?ew=20conversion=20methods=20instead.=20For=20=20=20=20=20=20=20?= =?UTF-8?q?more=20details,=20see=20the=20model=20conversion=20transition?= =?UTF-8?q?=20guide.=20=20=20=20=20+=20OpenVINO=20property=20Affinity=20AP?= =?UTF-8?q?I=20will=20be=20discontinued=20with=20=20=20=20=20=20=20OpenVIN?= =?UTF-8?q?O=202025.0.=20It=20will=20be=20replaced=20with=20CPU=20binding?= =?UTF-8?q?=20=20=20=20=20=20=20configurations=20(ov::hint::enable=5Fcpu?= =?UTF-8?q?=5Fpinning).=20=20=20=20=20+=20OpenVINO=20Model=20Server=20comp?= =?UTF-8?q?onents:=20=20=20=20=20=20=20-=20=E2=80=9Cauto=20shape=E2=80=9D?= =?UTF-8?q?=20and=20=E2=80=9Cauto=20batch=20size=E2=80=9D=20(reshaping=20a?= =?UTF-8?q?=20model=20=20=20=20=20=20=20=20=20in=20runtime)=20will=20be=20?= =?UTF-8?q?removed=20in=20the=20future.=20OpenVINO=E2=80=99s=20=20=20=20?= =?UTF-8?q?=20=20=20=20=20dynamic=20shape=20models=20are=20recommended=20i?= =?UTF-8?q?nstead.=20=20=20=20=20+=20A=20number=20of=20notebooks=20have=20?= =?UTF-8?q?been=20deprecated.=20For=20an=20=20=20=20=20=20=20up-to-date=20?= =?UTF-8?q?listing=20of=20available=20notebooks,=20refer=20to=20=20=20=20?= =?UTF-8?q?=20=20=20the=20OpenVINO=E2=84=A2=20Notebook=20index=20(openvino?= =?UTF-8?q?toolkit.github.io).?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/openvino?expand=0&rev=13 --- .gitattributes | 23 + .gitignore | 1 + _constraints | 11 + _service | 16 + openvino-2024.2.0.obscpio | 3 + openvino-2024.3.0.obscpio | 3 + openvino-ComputeLibrary-include-string.patch | 12 + openvino-fix-build-sample-path.patch | 12 + openvino-fix-install-paths.patch | 61 +++ openvino-onnx-ml-defines.patch | 12 + openvino-remove-npu-compile-tool.patch | 28 ++ openvino-rpmlintrc | 4 + openvino.changes | 331 +++++++++++++ openvino.obsinfo | 4 + openvino.spec | 475 +++++++++++++++++++ 15 files changed, 996 insertions(+) create mode 100644 .gitattributes create mode 100644 .gitignore create mode 100644 _constraints create mode 100644 _service create mode 100644 openvino-2024.2.0.obscpio create mode 100644 openvino-2024.3.0.obscpio create mode 100644 openvino-ComputeLibrary-include-string.patch create mode 100644 openvino-fix-build-sample-path.patch create mode 100644 openvino-fix-install-paths.patch create mode 100644 openvino-onnx-ml-defines.patch create mode 100644 openvino-remove-npu-compile-tool.patch create mode 100644 openvino-rpmlintrc create mode 100644 openvino.changes create mode 100644 openvino.obsinfo create mode 100644 openvino.spec diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..9b03811 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,23 @@ +## Default LFS +*.7z filter=lfs diff=lfs merge=lfs -text +*.bsp filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.gem filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.jar filter=lfs diff=lfs merge=lfs -text +*.lz filter=lfs diff=lfs merge=lfs -text +*.lzma filter=lfs diff=lfs merge=lfs -text +*.obscpio filter=lfs diff=lfs merge=lfs -text +*.oxt filter=lfs diff=lfs merge=lfs -text +*.pdf filter=lfs diff=lfs merge=lfs -text +*.png filter=lfs diff=lfs merge=lfs -text +*.rpm filter=lfs diff=lfs merge=lfs -text +*.tbz filter=lfs diff=lfs merge=lfs -text +*.tbz2 filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.ttf filter=lfs diff=lfs merge=lfs -text +*.txz filter=lfs diff=lfs merge=lfs -text +*.whl filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..57affb6 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +.osc diff --git a/_constraints b/_constraints new file mode 100644 index 0000000..69ac422 --- /dev/null +++ b/_constraints @@ -0,0 +1,11 @@ + + + + + 20 + + + 8 + + + diff --git a/_service b/_service new file mode 100644 index 0000000..6f64368 --- /dev/null +++ b/_service @@ -0,0 +1,16 @@ + + + https://github.com/openvinotoolkit/openvino.git + git + 2024.3.0 + 2024.3.0 + enable + openvino + .git + + + + *.tar + zstd + + diff --git a/openvino-2024.2.0.obscpio b/openvino-2024.2.0.obscpio new file mode 100644 index 0000000..ef6fbe9 --- /dev/null +++ b/openvino-2024.2.0.obscpio @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50b3efee39ea06430456d49db9b4173b22999d8b3e7547dc658bb37df82b0f1b +size 1036420623 diff --git a/openvino-2024.3.0.obscpio b/openvino-2024.3.0.obscpio new file mode 100644 index 0000000..64bfc11 --- /dev/null +++ b/openvino-2024.3.0.obscpio @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bacc2b9540afda6c5bd6d17ddea35afe17caefdd4fa1a350ed1c8be2eb290981 +size 1055294991 diff --git a/openvino-ComputeLibrary-include-string.patch b/openvino-ComputeLibrary-include-string.patch new file mode 100644 index 0000000..78a2fdf --- /dev/null +++ b/openvino-ComputeLibrary-include-string.patch @@ -0,0 +1,12 @@ +Index: openvino-2024.0.0/src/plugins/intel_cpu/thirdparty/ComputeLibrary/arm_compute/core/utils/logging/IPrinter.h +=================================================================== +--- openvino-2024.0.0.orig/src/plugins/intel_cpu/thirdparty/ComputeLibrary/arm_compute/core/utils/logging/IPrinter.h ++++ openvino-2024.0.0/src/plugins/intel_cpu/thirdparty/ComputeLibrary/arm_compute/core/utils/logging/IPrinter.h +@@ -25,6 +25,7 @@ + #define ARM_COMPUTE_LOGGING_PRINTER_H + + #include "support/Mutex.h" ++#include + + namespace arm_compute + { diff --git a/openvino-fix-build-sample-path.patch b/openvino-fix-build-sample-path.patch new file mode 100644 index 0000000..f0742b1 --- /dev/null +++ b/openvino-fix-build-sample-path.patch @@ -0,0 +1,12 @@ +diff -uNr openvino.orig/samples/cpp/build_samples.sh openvino/samples/cpp/build_samples.sh +--- openvino.orig/samples/cpp/build_samples.sh 2024-04-25 01:04:42.451868881 -0300 ++++ openvino/samples/cpp/build_samples.sh 2024-04-25 01:05:04.678342617 -0300 +@@ -59,7 +59,7 @@ + printf "\nSetting environment variables for building samples...\n" + + if [ -z "$INTEL_OPENVINO_DIR" ]; then +- if [[ "$SAMPLES_SOURCE_DIR" = "/usr/share/openvino"* ]]; then ++ if [[ "$SAMPLES_SOURCE_DIR" = "/usr/share/OpenVINO"* ]]; then + true + elif [ -e "$SAMPLES_SOURCE_DIR/../../setupvars.sh" ]; then + setupvars_path="$SAMPLES_SOURCE_DIR/../../setupvars.sh" diff --git a/openvino-fix-install-paths.patch b/openvino-fix-install-paths.patch new file mode 100644 index 0000000..df80d23 --- /dev/null +++ b/openvino-fix-install-paths.patch @@ -0,0 +1,61 @@ +Index: openvino-2024.0.0/cmake/developer_package/packaging/archive.cmake +=================================================================== +--- openvino-2024.0.0.orig/cmake/developer_package/packaging/archive.cmake ++++ openvino-2024.0.0/cmake/developer_package/packaging/archive.cmake +@@ -21,15 +21,19 @@ endif() + macro(ov_archive_cpack_set_dirs) + # common "archive" package locations + # TODO: move current variables to OpenVINO specific locations +- set(OV_CPACK_INCLUDEDIR runtime/include) +- set(OV_CPACK_OPENVINO_CMAKEDIR runtime/cmake) +- set(OV_CPACK_DOCDIR docs) +- set(OV_CPACK_LICENSESDIR licenses) +- set(OV_CPACK_SAMPLESDIR samples) +- set(OV_CPACK_WHEELSDIR tools) +- set(OV_CPACK_TOOLSDIR tools) +- set(OV_CPACK_DEVREQDIR tools) +- set(OV_CPACK_PYTHONDIR python) ++ set(OV_CPACK_INCLUDEDIR include) ++ set(OV_CPACK_OPENVINO_CMAKEDIR ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}) ++ set(OV_CPACK_DOCDIR ${CMAKE_INSTALL_DOCDIR}) ++ set(OV_CPACK_LICENSESDIR ${CMAKE_INSTALL_DATAROOTDIR}/licenses/${PROJECT_NAME}) ++ set(OV_CPACK_SAMPLESDIR ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/samples) ++ if (ENABLE_PYTHON) ++ find_package(Python3 QUIET COMPONENTS Interpreter) ++ file(RELATIVE_PATH OV_PYTHON_MODPATH ${CMAKE_INSTALL_PREFIX} ${Python3_SITEARCH}) ++ set(OV_CPACK_WHEELSDIR tools) ++ set(OV_CPACK_TOOLSDIR tools) ++ set(OV_CPACK_DEVREQDIR tools) ++ set(OV_CPACK_PYTHONDIR ${OV_PYTHON_MODPATH}) ++ endif() + + if(USE_BUILD_TYPE_SUBFOLDER) + set(build_type ${CMAKE_BUILD_TYPE}) +@@ -46,11 +50,11 @@ macro(ov_archive_cpack_set_dirs) + set(OV_CPACK_RUNTIMEDIR runtime/lib/${ARCH_FOLDER}/${build_type}) + set(OV_CPACK_ARCHIVEDIR runtime/lib/${ARCH_FOLDER}/${build_type}) + else() +- set(OV_CPACK_LIBRARYDIR runtime/lib/${ARCH_FOLDER}) +- set(OV_CPACK_RUNTIMEDIR runtime/lib/${ARCH_FOLDER}) +- set(OV_CPACK_ARCHIVEDIR runtime/lib/${ARCH_FOLDER}) ++ set(OV_CPACK_LIBRARYDIR ${CMAKE_INSTALL_LIBDIR}) ++ set(OV_CPACK_RUNTIMEDIR ${CMAKE_INSTALL_LIBDIR}) ++ set(OV_CPACK_ARCHIVEDIR ${CMAKE_INSTALL_LIBDIR}) + endif() +- set(OV_CPACK_PLUGINSDIR ${OV_CPACK_RUNTIMEDIR}) ++ set(OV_CPACK_PLUGINSDIR ${OV_CPACK_RUNTIMEDIR}/${PROJECT_NAME}) + endmacro() + + ov_archive_cpack_set_dirs() +Index: openvino-2024.0.0/src/cmake/openvino.cmake +=================================================================== +--- openvino-2024.0.0.orig/src/cmake/openvino.cmake ++++ openvino-2024.0.0/src/cmake/openvino.cmake +@@ -254,6 +254,7 @@ if(ENABLE_PKGCONFIG_GEN) + + # define relative paths + file(RELATIVE_PATH PKGCONFIG_OpenVINO_PREFIX "/${OV_CPACK_RUNTIMEDIR}/pkgconfig" "/") ++ cmake_path(NORMAL_PATH PKGCONFIG_OpenVINO_PREFIX) + + set(pkgconfig_in "${OpenVINO_SOURCE_DIR}/cmake/templates/openvino.pc.in") + if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.20 AND OV_GENERATOR_MULTI_CONFIG) diff --git a/openvino-onnx-ml-defines.patch b/openvino-onnx-ml-defines.patch new file mode 100644 index 0000000..1ab47eb --- /dev/null +++ b/openvino-onnx-ml-defines.patch @@ -0,0 +1,12 @@ +Index: openvino-2024.0.0/thirdparty/dependencies.cmake +=================================================================== +--- openvino-2024.0.0.orig/thirdparty/dependencies.cmake ++++ openvino-2024.0.0/thirdparty/dependencies.cmake +@@ -482,6 +482,7 @@ if(ENABLE_OV_ONNX_FRONTEND) + + if(ONNX_FOUND) + # conan and vcpkg create imported targets 'onnx' and 'onnx_proto' ++ add_compile_definitions(ONNX_ML=1) + else() + add_subdirectory(thirdparty/onnx) + endif() diff --git a/openvino-remove-npu-compile-tool.patch b/openvino-remove-npu-compile-tool.patch new file mode 100644 index 0000000..4b4f3e0 --- /dev/null +++ b/openvino-remove-npu-compile-tool.patch @@ -0,0 +1,28 @@ +diff -uNr openvino.orig/src/plugins/intel_npu/tools/CMakeLists.txt openvino/src/plugins/intel_npu/tools/CMakeLists.txt +--- openvino.orig/src/plugins/intel_npu/tools/CMakeLists.txt 2024-08-02 23:32:03.216982353 -0300 ++++ openvino/src/plugins/intel_npu/tools/CMakeLists.txt 2024-08-04 17:22:22.899469769 -0300 +@@ -4,5 +4,4 @@ + # + + add_subdirectory(common) +-add_subdirectory(compile_tool) + add_subdirectory(single-image-test) +diff -uNr openvino.orig/src/plugins/intel_npu/tools/compile_tool/CMakeLists.txt openvino/src/plugins/intel_npu/tools/compile_tool/CMakeLists.txt +--- openvino.orig/src/plugins/intel_npu/tools/compile_tool/CMakeLists.txt 2024-08-02 23:32:03.216982353 -0300 ++++ openvino/src/plugins/intel_npu/tools/compile_tool/CMakeLists.txt 2024-08-03 02:36:25.059440300 -0300 +@@ -44,13 +44,13 @@ + # + + install(TARGETS ${TARGET_NAME} +- RUNTIME DESTINATION "tools/${TARGET_NAME}" ++ RUNTIME DESTINATION "share/OpenVINO/tools/${TARGET_NAME}" + COMPONENT ${NPU_INTERNAL_COMPONENT} + ${OV_CPACK_COMP_NPU_INTERNAL_EXCLUDE_ALL}) + + if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/README.md") + install(FILES "${CMAKE_CURRENT_SOURCE_DIR}/README.md" +- DESTINATION "tools/${TARGET_NAME}" ++ DESTINATION "share/OpenVINO/tools/${TARGET_NAME}" + COMPONENT ${NPU_INTERNAL_COMPONENT} + ${OV_CPACK_COMP_NPU_INTERNAL_EXCLUDE_ALL}) + endif() diff --git a/openvino-rpmlintrc b/openvino-rpmlintrc new file mode 100644 index 0000000..4ca21df --- /dev/null +++ b/openvino-rpmlintrc @@ -0,0 +1,4 @@ +addFilter("openvino-sample.*: E: devel-file-in-non-devel-package") + +# These files are part of samples, meant for the user to copy and re-use, so env based hashbangs are preferred +addFilter("openvino-sample.*: E: env-script-interpreter") diff --git a/openvino.changes b/openvino.changes new file mode 100644 index 0000000..c5da03c --- /dev/null +++ b/openvino.changes @@ -0,0 +1,331 @@ +------------------------------------------------------------------- +Sat Aug 10 01:41:06 UTC 2024 - Alessandro de Oliveira Faria + +- Update to 2024.3.0 +- Summary of major features and improvements   + * More Gen AI coverage and framework integrations to minimize + code changes + + OpenVINO pre-optimized models are now available in Hugging + Face making it easier for developers to get started with + these models. + * Broader Large Language Model (LLM) support and more model + compression techniques. + + Significant improvement in LLM performance on Intel + discrete GPUs with the addition of Multi-Head Attention + (MHA) and OneDNN enhancements. + * More portability and performance to run AI at the edge, in the + cloud, or locally. + + Improved CPU performance when serving LLMs with the + inclusion of vLLM and continuous batching in the OpenVINO + Model Server (OVMS). vLLM is an easy-to-use open-source + library that supports efficient LLM inferencing and model + serving. + + Ubuntu 24.04 long-term support (LTS), 64-bit (Kernel 6.8+) + (preview support) +- Support Change and Deprecation Notices + * Using deprecated features and components is not advised. + They are available to enable a smooth transition to new + solutions and will be discontinued in the future. To keep + using discontinued features, you will have to revert to the + last LTS OpenVINO version supporting them. For more details, + refer to the OpenVINO Legacy Features and Components page. + * Discontinued in 2024.0: + + Runtime components: + - Intel® Gaussian & Neural Accelerator (Intel® GNA)..Consider + using the Neural Processing Unit (NPU) for low-powered + systems like Intel® Core™ Ultra or 14th generation + and beyond. + - OpenVINO C++/C/Python 1.0 APIs (see 2023.3 API transition + guide for reference). + - All ONNX Frontend legacy API (known as ONNX_IMPORTER_API) + - 'PerfomanceMode.UNDEFINED' property as part of the OpenVINO + Python API + + Tools: + - Deployment Manager. See installation and deployment guides + for current distribution options. + - Accuracy Checker. + - Post-Training Optimization Tool (POT). Neural Network + Compression Framework (NNCF) should be used instead. + - A Git patch for NNCF integration with huggingface/ + transformers. The recommended approach is to use + huggingface/optimum-intel for applying NNCF optimization + on top of models from Hugging Face. + - Support for Apache MXNet, Caffe, and Kaldi model formats. + Conversion to ONNX may be used as a solution. + * Deprecated and to be removed in the future: + + The OpenVINO™ Development Tools package (pip install + openvino-dev) will be removed from installation options + and distribution channels beginning with OpenVINO 2025.0. + + Model Optimizer will be discontinued with OpenVINO 2025.0. + Consider using the new conversion methods instead. For + more details, see the model conversion transition guide. + + OpenVINO property Affinity API will be discontinued with + OpenVINO 2025.0. It will be replaced with CPU binding + configurations (ov::hint::enable_cpu_pinning). + + OpenVINO Model Server components: + - “auto shape” and “auto batch size” (reshaping a model + in runtime) will be removed in the future. OpenVINO’s + dynamic shape models are recommended instead. + + A number of notebooks have been deprecated. For an + up-to-date listing of available notebooks, refer to + the OpenVINO™ Notebook index (openvinotoolkit.github.io). + +------------------------------------------------------------------- +Sat Jun 22 12:01:23 UTC 2024 - Andreas Schwab + +- Add riscv-cpu-plugin subpackage + +------------------------------------------------------------------- +Wed Jun 19 21:36:01 UTC 2024 - Alessandro de Oliveira Faria + +- Update to 2024.2.0 +- More Gen AI coverage and framework integrations to minimize code + changes + * Llama 3 optimizations for CPUs, built-in GPUs, and discrete + GPUs for improved performance and efficient memory usage. + * Support for Phi-3-mini, a family of AI models that leverages + the power of small language models for faster, more accurate + and cost-effective text processing. + * Python Custom Operation is now enabled in OpenVINO making it + easier for Python developers to code their custom operations + instead of using C++ custom operations (also supported). + Python Custom Operation empowers users to implement their own + specialized operations into any model. + * Notebooks expansion to ensure better coverage for new models. + Noteworthy notebooks added: DynamiCrafter, YOLOv10, Chatbot + notebook with Phi-3, and QWEN2. +- Broader Large Language Model (LLM) support and more model + compression techniques. + * GPTQ method for 4-bit weight compression added to NNCF for + more efficient inference and improved performance of + compressed LLMs. + * Significant LLM performance improvements and reduced latency + for both built-in GPUs and discrete GPUs. + * Significant improvement in 2nd token latency and memory + footprint of FP16 weight LLMs on AVX2 (13th Gen Intel® Core™ + processors) and AVX512 (3rd Gen Intel® Xeon® Scalable + Processors) based CPU platforms, particularly for small + batch sizes. +- More portability and performance to run AI at the edge, in the + cloud, or locally. + * Model Serving Enhancements: + * Preview: OpenVINO Model Server (OVMS) now supports + OpenAI-compatible API along with Continuous Batching and + PagedAttention, enabling significantly higher throughput + for parallel inferencing, especially on Intel® Xeon® + processors, when serving LLMs to many concurrent users. + * OpenVINO backend for Triton Server now supports built-in + GPUs and discrete GPUs, in addition to dynamic + shapes support. + * Integration of TorchServe through torch.compile OpenVINO + backend for easy model deployment, provisioning to + multiple instances, model versioning, and maintenance. + * Preview: addition of the Generate API, a simplified API + for text generation using large language models with only + a few lines of code. The API is available through the newly + launched OpenVINO GenAI package. + * Support for Intel Atom® Processor X Series. For more details, + see System Requirements. + * Preview: Support for Intel® Xeon® 6 processor. +- Support Change and Deprecation Notices + * Using deprecated features and components is not advised. + They are available to enable a smooth transition to new + solutions and will be discontinued in the future. + To keep using discontinued features, you will have to revert + to the last LTS OpenVINO version supporting them. For more + details, refer to the OpenVINO Legacy Features and + Components page. + * Discontinued in 2024.0: + + Runtime components: + - Intel® Gaussian & Neural Accelerator (Intel® GNA). + Consider using the Neural Processing Unit (NPU) for + low-powered systems like Intel® Core™ Ultra or 14th + generation and beyond. + - OpenVINO C++/C/Python 1.0 APIs (see 2023.3 API + transition guide for reference). + - All ONNX Frontend legacy API (known as ONNX_IMPORTER_API) + - 'PerfomanceMode.UNDEFINED' property as part of the + OpenVINO Python API + + Tools: + - Deployment Manager. See installation and deployment + guides for current distribution options. + - Accuracy Checker. + - Post-Training Optimization Tool (POT). Neural Network + Compression Framework (NNCF) should be used instead. + - A Git patch for NNCF integration with  + huggingface/transformers. The recommended approach  + is to use huggingface/optimum-intel for applying NNCF + optimization on top of models from Hugging Face. + - Support for Apache MXNet, Caffe, and Kaldi model formats. + Conversion to ONNX may be used as a solution. + * Deprecated and to be removed in the future: + + The OpenVINO™ Development Tools package (pip install + openvino-dev) will be removed from installation options + and distribution channels beginning with OpenVINO 2025.0. + + Model Optimizer will be discontinued with OpenVINO 2025.0. + Consider using the new conversion methods instead. For + more details, see the model conversion transition guide. + + OpenVINO property Affinity API will be discontinued with + OpenVINO 2025.0. It will be replaced with CPU binding + configurations (ov::hint::enable_cpu_pinning). + + OpenVINO Model Server components: + + “auto shape” and “auto batch size” (reshaping a model in + runtime) will be removed in the future. OpenVINO’s dynamic + shape models are recommended instead. + + A number of notebooks have been deprecated. For an + up-to-date listing of available notebooks, refer to the + OpenVINO™ Notebook index (openvinotoolkit.github.io). + +------------------------------------------------------------------- +Thu May 9 22:56:53 UTC 2024 - Alessandro de Oliveira Faria + +- Fix sample source path in build script: + * openvino-fix-build-sample-path.patch +- Update to 2024.1.0 +- More Generative AI coverage and framework integrations to + minimize code changes. + * Mixtral and URLNet models optimized for performance + improvements on Intel® Xeon® processors. + * Stable Diffusion 1.5, ChatGLM3-6B, and Qwen-7B models + optimized for improved inference speed on Intel® Core™ + Ultra processors with integrated GPU. + * Support for Falcon-7B-Instruct, a GenAI Large Language Model + (LLM) ready-to-use chat/instruct model with superior + performance metrics. + * New Jupyter Notebooks added: YOLO V9, YOLO V8 + Oriented Bounding Boxes Detection (OOB), Stable Diffusion + in Keras, MobileCLIP, RMBG-v1.4 Background Removal, Magika, + TripoSR, AnimateAnyone, LLaVA-Next, and RAG system with + OpenVINO and LangChain. +- Broader Large Language Model (LLM) support and more model + compression techniques. + * LLM compilation time reduced through additional optimizations + with compressed embedding. Improved 1st token performance of + LLMs on 4th and 5th generations of Intel® Xeon® processors + with Intel® Advanced Matrix Extensions (Intel® AMX). + * Better LLM compression and improved performance with oneDNN, + INT4, and INT8 support for Intel® Arc™ GPUs. + * Significant memory reduction for select smaller GenAI + models on Intel® Core™ Ultra processors with integrated GPU. +- More portability and performance to run AI at the edge, + in the cloud, or locally. + * The preview NPU plugin for Intel® Core™ Ultra processors + is now available in the OpenVINO open-source GitHub + repository, in addition to the main OpenVINO package on PyPI. + * The JavaScript API is now more easily accessible through + the npm repository, enabling JavaScript developers’ seamless + access to the OpenVINO API. + * FP16 inference on ARM processors now enabled for the + Convolutional Neural Network (CNN) by default. +- Support Change and Deprecation Notices + * Using deprecated features and components is not advised. They + are available to enable a smooth transition to new solutions + and will be discontinued in the future. To keep using + Discontinued features, you will have to revert to the last + LTS OpenVINO version supporting them. + * For more details, refer to the OpenVINO Legacy Features + and Components page. + * Discontinued in 2024.0: + + Runtime components: + - Intel® Gaussian & Neural Accelerator (Intel® GNA). + Consider using the Neural Processing Unit (NPU) + for low-powered systems like Intel® Core™ Ultra or + 14th generation and beyond. + - OpenVINO C++/C/Python 1.0 APIs (see 2023.3 API + transition guide for reference). + - All ONNX Frontend legacy API (known as + ONNX_IMPORTER_API) + - 'PerfomanceMode.UNDEFINED' property as part of + the OpenVINO Python API + + Tools: + - Deployment Manager. See installation and deployment + guides for current distribution options. + - Accuracy Checker. + - Post-Training Optimization Tool (POT). Neural Network + Compression Framework (NNCF) should be used instead. + - A Git patch for NNCF integration with  + huggingface/transformers. The recommended approach +  is to use huggingface/optimum-intel for applying + NNCF optimization on top of models from Hugging + Face. + - Support for Apache MXNet, Caffe, and Kaldi model + formats. Conversion to ONNX may be used as + a solution. + * Deprecated and to be removed in the future: + + The OpenVINO™ Development Tools package (pip install + openvino-dev) will be removed from installation options + and distribution channels beginning with OpenVINO 2025.0. + + Model Optimizer will be discontinued with OpenVINO 2025.0. + Consider using the new conversion methods instead. For + more details, see the model conversion transition guide. + + OpenVINO property Affinity API will be discontinued with + OpenVINO 2025.0. It will be replaced with CPU binding + configurations (ov::hint::enable_cpu_pinning). + + OpenVINO Model Server components: + - “auto shape” and “auto batch size” (reshaping a model + in runtime) will be removed in the future. OpenVINO’s + dynamic shape models are recommended instead. + +------------------------------------------------------------------- +Tue Apr 23 18:57:17 UTC 2024 - Atri Bhattacharya + +- License update: play safe and list all third party licenses as + part of the License tag. + +------------------------------------------------------------------- +Tue Apr 23 12:42:32 UTC 2024 - Atri Bhattacharya + +- Switch to _service file as tagged Source tarball does not + include `./thirdparty` submodules. +- Update openvino-fix-install-paths.patch to fix python module + install path. +- Enable python module and split it out into a python subpackage + (for now default python3 only). +- Explicitly build python metadata (dist-info) and install it + (needs simple sed hackery to support "officially" unsupported + platform ppc64le). +- Specify ENABLE_JS=OFF to turn off javascript bindings as + building these requires downloading npm stuff from the network. +- Build with system pybind11. +- Bump _constraints for updated disk space requirements. +- Drop empty %check section, rpmlint was misleading when it + recommended adding this. + +------------------------------------------------------------------- +Fri Apr 19 08:08:02 UTC 2024 - Atri Bhattacharya + +- Numerous specfile cleanups: + * Drop redundant `mv` commands and use `install` where + appropriate. + * Build with system protobuf. + * Fix Summary tags. + * Trim package descriptions. + * Drop forcing CMAKE_BUILD_TYPE=Release, let macro default + RelWithDebInfo be used instead. + * Correct naming of shared library packages. + * Separate out libopenvino_c.so.* into own shared lib package. + * Drop rpmlintrc rule used to hide shlib naming mistakes. + * Rename Source tarball to %{name}-%{version}.EXT pattern. + * Use ldconfig_scriptlet macro for post(un). +- Add openvino-onnx-ml-defines.patch -- Define ONNX_ML at compile + time when using system onnx to allow using 'onnx-ml.pb.h' + instead of 'onnx.pb.h', the latter not being shipped with + openSUSE's onnx-devel package (gh#onnx/onnx#3074). +- Add openvino-fix-install-paths.patch: Change hard-coded install + paths in upstream cmake macro to standard Linux dirs. +- Add openvino-ComputeLibrary-include-string.patch: Include header + for std::string. +- Add external devel packages as Requires for openvino-devel. +- Pass -Wl,-z,noexecstack to %build_ldflags to avoid an exec stack + issue with intel CPU plugin. +- Use ninja for build. +- Adapt _constraits file for correct disk space and memory + requirements. +- Add empty %check section. + +------------------------------------------------------------------- +Mon Apr 15 03:18:33 UTC 2024 - Alessandro de Oliveira Faria + +- Initial package +- Version 2024.0.0 +- Add openvino-rpmlintrc. diff --git a/openvino.obsinfo b/openvino.obsinfo new file mode 100644 index 0000000..cc26d08 --- /dev/null +++ b/openvino.obsinfo @@ -0,0 +1,4 @@ +name: openvino +version: 2024.3.0 +mtime: 1721394417 +commit: 1e3b88e4e3f89774923e04e845428579f8ffa0fe diff --git a/openvino.spec b/openvino.spec new file mode 100644 index 0000000..b276196 --- /dev/null +++ b/openvino.spec @@ -0,0 +1,475 @@ +# +# spec file for package openvino +# +# Copyright (c) 2024 SUSE LLC +# Copyright (c) 2024 Alessandro de Oliveira Faria (A.K.A. CABELO) or +# +# All modifications and additions to the file contributed by third parties +# remain the property of their copyright owners, unless otherwise agreed +# upon. The license for this file, and modifications and additions to the +# file, is the same license as for the pristine package itself (unless the +# license for the pristine package is not an Open Source License, in which +# case the license is the MIT License). An "Open Source License" is a +# license that conforms to the Open Source Definition (Version 1.9) +# published by the Open Source Initiative. + +# Please submit bugfixes or comments via https://bugs.opensuse.org/ +# + + +# Note: Will not build on Leap:15.X on account of too old TBB +# Compilation takes ~1 hr on OBS for a single python, don't try all supported flavours +%define pythons python3 +%define __builder ninja +%define so_ver 2430 +%define shlib lib%{name}%{so_ver} +%define shlib_c lib%{name}_c%{so_ver} +%define prj_name OpenVINO + +Name: openvino +Version: 2024.3.0 +Release: 0 +Summary: A toolkit for optimizing and deploying AI inference +# Let's be safe and put all third party licenses here, no matter that we use specific thirdparty libs or not +License: Apache-2.0 AND BSD-2-Clause AND BSD-3-Clause AND HPND AND JSON AND MIT AND OFL-1.1 AND Zlib +URL: https://github.com/openvinotoolkit/openvino +Source0: %{name}-%{version}.tar.zst +Source1: %{name}-rpmlintrc +# PATCH-FEATURE-OPENSUSE openvino-onnx-ml-defines.patch badshah400@gmail.com -- Define ONNX_ML at compile time when using system onnx to allow using 'onnx-ml.pb.h' instead of 'onnx.pb.h', the latter not being shipped with openSUSE's onnx-devel package +Patch0: openvino-onnx-ml-defines.patch +# PATCH-FEATURE-OPENSUSE openvino-fix-install-paths.patch badshah400@gmail.com -- Fix installation paths hardcoded into upstream defined cmake macros +Patch2: openvino-fix-install-paths.patch +# PATCH-FIX-UPSTREAM openvino-ComputeLibrary-include-string.patch badshah400@gmail.com -- Include header for std::string +Patch3: openvino-ComputeLibrary-include-string.patch +# PATCH-FIX-UPSTREAM openvino-fix-build-sample-path.patch cabelo@opensuse.org -- Fix sample source path in build script +Patch4: openvino-fix-build-sample-path.patch +# PATCH-FIX-UPSTREAM openvino-remove-npu-compile-tool.patch cabelo@opensuse.org -- Remove NPU Compile Tool +Patch5: openvino-remove-npu-compile-tool.patch + +BuildRequires: ade-devel +BuildRequires: cmake +BuildRequires: fdupes +BuildRequires: gcc-c++ +BuildRequires: ninja +BuildRequires: opencl-cpp-headers +# FIXME: /usr/include/onnx/onnx-ml.pb.h:17:2: error: This file was generated by +# an older version of protoc which is incompatible with your Protocol Buffer +# headers. Please regenerate this file with a newer version of protoc. +#BuildRequires: cmake(ONNX) +BuildRequires: pkgconfig +BuildRequires: %{python_module devel} +BuildRequires: %{python_module pip} +BuildRequires: %{python_module pybind11-devel} +BuildRequires: %{python_module setuptools} +BuildRequires: %{python_module wheel} +BuildRequires: python-rpm-macros +BuildRequires: zstd +BuildRequires: pkgconfig(OpenCL-Headers) +BuildRequires: pkgconfig(flatbuffers) +BuildRequires: pkgconfig(libva) +BuildRequires: pkgconfig(nlohmann_json) +BuildRequires: pkgconfig(ocl-icd) +BuildRequires: pkgconfig(protobuf) +BuildRequires: pkgconfig(pugixml) +BuildRequires: pkgconfig(snappy) +BuildRequires: pkgconfig(tbb) +BuildRequires: pkgconfig(zlib) +%ifarch %{arm64} +BuildRequires: scons +%endif +# No 32-bit support +ExcludeArch: %{ix86} %{arm32} ppc +%define python_subpackage_only 1 +%python_subpackages + +%description +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + + +## Main shared libs and devel pkg ## +# + +%package -n %{shlib} +Summary: Shared library for OpenVINO toolkit + +%description -n %{shlib} +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the shared library for OpenVINO. + + +# + +%package -n %{shlib_c} +Summary: Shared C library for OpenVINO toolkit + +%description -n %{shlib_c} +This package provides the C library for OpenVINO. + + +# + +%package -n %{name}-devel +Summary: Headers and sources for OpenVINO toolkit +Requires: %{shlib_c} = %{version} +Requires: %{shlib} = %{version} +Requires: lib%{name}_ir_frontend%{so_ver} = %{version} +Requires: lib%{name}_onnx_frontend%{so_ver} = %{version} +Requires: lib%{name}_paddle_frontend%{so_ver} = %{version} +Requires: lib%{name}_pytorch_frontend%{so_ver} = %{version} +Requires: lib%{name}_tensorflow_frontend%{so_ver} = %{version} +Requires: lib%{name}_tensorflow_lite_frontend%{so_ver} = %{version} +Requires: pkgconfig(OpenCL-Headers) +Requires: pkgconfig(flatbuffers) +Requires: pkgconfig(libva) +Requires: pkgconfig(nlohmann_json) +Requires: pkgconfig(ocl-icd) +Requires: pkgconfig(protobuf) +Requires: pkgconfig(pugixml) +Requires: pkgconfig(snappy) +Requires: pkgconfig(tbb) +Recommends: %{name}-auto-batch-plugin = %{version} +Recommends: %{name}-auto-plugin = %{version} +Recommends: %{name}-hetero-plugin = %{version} +Recommends: %{name}-intel-cpu-plugin = %{version} +%ifarch riscv64 +Recommends: %{name}-riscv-cpu-plugin = %{version} +%endif + +%description -n %{name}-devel +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the headers and sources for developing applications with +OpenVINO. + + +## Plugins ## +# + +%package -n %{name}-arm-cpu-plugin +Summary: Intel CPU plugin for OpenVINO toolkit + +%description -n %{name}-arm-cpu-plugin +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the ARM CPU plugin for OpenVINO on %{arm64} archs. + + +# +%package -n %{name}-riscv-cpu-plugin +Summary: RISC-V CPU plugin for OpenVINO toolkit + +%description -n %{name}-riscv-cpu-plugin +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the RISC-V CPU plugin for OpenVINO on riscv64 archs. + + +# +%package -n %{name}-auto-plugin +Summary: Auto / Multi software plugin for OpenVINO toolkit + +%description -n %{name}-auto-plugin +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the Auto / Multi software plugin for OpenVINO. + + +# + +%package -n %{name}-auto-batch-plugin +Summary: Automatic batch software plugin for OpenVINO toolkit + +%description -n %{name}-auto-batch-plugin +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the automatic batch software plugin for OpenVINO. + + +# + +%package -n %{name}-hetero-plugin +Summary: Hetero frontend for Intel OpenVINO toolkit + +%description -n %{name}-hetero-plugin +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the hetero frontend for OpenVINO. + + +# + +%package -n %{name}-intel-cpu-plugin +Summary: Intel CPU plugin for OpenVINO toolkit + +%description -n %{name}-intel-cpu-plugin +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the intel CPU plugin for OpenVINO for %{x86_64} archs. + + +# + +%package -n %{name}-intel-npu-plugin +Summary: Intel NPU plugin for OpenVINO toolkit + +%description -n %{name}-intel-npu-plugin +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the intel NPU plugin for OpenVINO for %{x86_64} archs. + + + +## Frontend shared libs ## +# + +%package -n lib%{name}_ir_frontend%{so_ver} +Summary: Paddle frontend for Intel OpenVINO toolkit + +%description -n lib%{name}_ir_frontend%{so_ver} +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the ir frontend for OpenVINO. + + +# + +%package -n lib%{name}_onnx_frontend%{so_ver} +Summary: Onnx frontend for OpenVINO toolkit + +%description -n lib%{name}_onnx_frontend%{so_ver} +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the onnx frontend for OpenVINO. + + +# + +%package -n lib%{name}_paddle_frontend%{so_ver} +Summary: Paddle frontend for Intel OpenVINO toolkit + +%description -n lib%{name}_paddle_frontend%{so_ver} +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the paddle frontend for OpenVINO. + + +# + +%package -n lib%{name}_pytorch_frontend%{so_ver} +Summary: PyTorch frontend for OpenVINO toolkit + +%description -n lib%{name}_pytorch_frontend%{so_ver} +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the pytorch frontend for OpenVINO. + + +# + +%package -n lib%{name}_tensorflow_frontend%{so_ver} +Summary: TensorFlow frontend for OpenVINO toolkit + +%description -n lib%{name}_tensorflow_frontend%{so_ver} +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the tensorflow frontend for OpenVINO. + + +# + +%package -n lib%{name}_tensorflow_lite_frontend%{so_ver} +Summary: TensorFlow Lite frontend for OpenVINO toolkit + +%description -n lib%{name}_tensorflow_lite_frontend%{so_ver} +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides the tensorflow-lite frontend for OpenVINO. + + +## Python module ## +# + +%package -n python-openvino +Summary: Python module for openVINO toolkit +Requires: python-numpy < 2 +Requires: python-openvino-telemetry + +%description -n python-openvino +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides a Python module for interfacing with openVINO toolkit. + + +## Samples/examples ## +# + +%package -n %{name}-sample +Summary: Samples for use with OpenVINO toolkit +BuildArch: noarch + +%description -n %{name}-sample +OpenVINO is an open-source toolkit for optimizing and deploying AI inference. + +This package provides some samples for use with openVINO. + + +# +%prep +%autosetup -p1 + +%build +# Otherwise intel_cpu plugin declares an executable stack +%ifarch %{x86_64} +%define build_ldflags -Wl,-z,noexecstack +%endif +%cmake \ + -DCMAKE_CXX_STANDARD=17 \ + -DBUILD_SHARED_LIBS=ON \ + -DENABLE_OV_ONNX_FRONTEND=ON \ + -DENABLE_OV_PADDLE_FRONTEND=ON \ + -DENABLE_OV_PYTORCH_FRONTEND=ON \ + -DENABLE_OV_IR_FRONTEND=ON \ + -DENABLE_OV_TF_FRONTEND=ON \ + -DENABLE_OV_TF_LITE_FRONTEND=ON \ + -DENABLE_INTEL_GPU=OFF \ + -DENABLE_JS=OFF \ + -DENABLE_PYTHON=ON \ + -DENABLE_WHEEL=OFF \ + -DENABLE_SYSTEM_OPENCL=ON \ + -DENABLE_SYSTEM_PROTOBUF=ON \ + -DENABLE_SYSTEM_PUGIXML=ON \ + -DENABLE_SYSTEM_SNAPPY=ON \ + -DENABLE_SYSTEM_TBB=ON \ + -DONNX_USE_PROTOBUF_SHARED_LIBS=ON \ + -DProtobuf_USE_STATIC_LIBS=OFF \ + %{nil} +%cmake_build +# Manually generate dist-info dir +export WHEEL_VERSION=%{version} \ + BUILD_TYPE=RelWithDebInfo +%ifarch %{power64} + +# RelWithDebInfo +# Manual hackery for power64 because it not "officially" supported + sed -i "s/{ARCH}/%{_arch}/" ../src/bindings/python/wheel/setup.py +%endif +%python_exec ../src/bindings/python/wheel/setup.py dist_info -o ../ + +%install +%cmake_install + +rm %{buildroot}%{_datadir}/%{prj_name}/samples/cpp/thirdparty/nlohmann_json/.cirrus.yml + +# Hash-bangs in non-exec python sample scripts +sed -Ei "1{\@/usr/bin/env@d}" \ + %{buildroot}%{_datadir}/%{prj_name}/samples/python/benchmark/bert_benchmark/bert_benchmark.py \ + %{buildroot}%{_datadir}/%{prj_name}/samples/python/benchmark/sync_benchmark/sync_benchmark.py \ + %{buildroot}%{_datadir}/%{prj_name}/samples/python/benchmark/throughput_benchmark/throughput_benchmark.py \ + %{buildroot}%{_datadir}/%{prj_name}/samples/python/classification_sample_async/classification_sample_async.py \ + %{buildroot}%{_datadir}/%{prj_name}/samples/python/hello_classification/hello_classification.py \ + %{buildroot}%{_datadir}/%{prj_name}/samples/python/hello_query_device/hello_query_device.py \ + %{buildroot}%{_datadir}/%{prj_name}/samples/python/hello_reshape_ssd/hello_reshape_ssd.py \ + %{buildroot}%{_datadir}/%{prj_name}/samples/python/model_creation_sample/model_creation_sample.py + +# Unnecessary if we get our package dependencies and lib paths right! +rm -fr %{buildroot}%{_prefix}/install_dependencies \ + %{buildroot}%{_prefix}/setupvars.sh + +%{python_expand rm %{buildroot}%{$python_sitearch}/requirements.txt +chmod -x %{buildroot}%{$python_sitearch}/%{name}/tools/ovc/ovc.py +cp -r %{name}-%{version}.dist-info %{buildroot}%{$python_sitearch}/ +%fdupes %{buildroot}%{$python_sitearch}/%{name}/ +} + +%fdupes %{buildroot}%{_datadir}/ + +# We do not use bundled thirdparty libs +rm -fr %{buildroot}%{_datadir}/licenses/* + +%ldconfig_scriptlets -n %{shlib} +%ldconfig_scriptlets -n %{shlib_c} +%ldconfig_scriptlets -n lib%{name}_ir_frontend%{so_ver} +%ldconfig_scriptlets -n lib%{name}_onnx_frontend%{so_ver} +%ldconfig_scriptlets -n lib%{name}_paddle_frontend%{so_ver} +%ldconfig_scriptlets -n lib%{name}_pytorch_frontend%{so_ver} +%ldconfig_scriptlets -n lib%{name}_tensorflow_lite_frontend%{so_ver} +%ldconfig_scriptlets -n lib%{name}_tensorflow_frontend%{so_ver} + +%files -n %{shlib} +%license LICENSE +%{_libdir}/libopenvino.so.* + +%files -n %{shlib_c} +%license LICENSE +%{_libdir}/libopenvino_c.so.* + +%files -n %{name}-auto-batch-plugin +%dir %{_libdir}/%{prj_name} +%{_libdir}/%{prj_name}/libopenvino_auto_batch_plugin.so + +%files -n %{name}-auto-plugin +%dir %{_libdir}/%{prj_name} +%{_libdir}/%{prj_name}/libopenvino_auto_plugin.so + +%ifarch %{x86_64} +%files -n %{name}-intel-cpu-plugin +%dir %{_libdir}/%{prj_name} +%{_libdir}/%{prj_name}/libopenvino_intel_cpu_plugin.so + +%files -n %{name}-intel-npu-plugin +%dir %{_libdir}/%{prj_name} +%{_libdir}/%{prj_name}/libopenvino_intel_npu_plugin.so +%endif + +%ifarch %{arm64} +%files -n %{name}-arm-cpu-plugin +%dir %{_libdir}/%{prj_name} +%{_libdir}/%{prj_name}/libopenvino_arm_cpu_plugin.so +%endif + +%ifarch riscv64 +%files -n %{name}-riscv-cpu-plugin +%dir %{_libdir}/%{prj_name} +%{_libdir}/%{prj_name}/libopenvino_riscv_cpu_plugin.so +%endif + +%files -n %{name}-hetero-plugin +%dir %{_libdir}/%{prj_name} +%{_libdir}/%{prj_name}/libopenvino_hetero_plugin.so + +%files -n lib%{name}_onnx_frontend%{so_ver} +%{_libdir}/libopenvino_onnx_frontend.so.* + +%files -n lib%{name}_ir_frontend%{so_ver} +%{_libdir}/libopenvino_ir_frontend.so.* + +%files -n lib%{name}_paddle_frontend%{so_ver} +%{_libdir}/libopenvino_paddle_frontend.so.* + +%files -n lib%{name}_pytorch_frontend%{so_ver} +%{_libdir}/libopenvino_pytorch_frontend.so.* + +%files -n lib%{name}_tensorflow_frontend%{so_ver} +%{_libdir}/libopenvino_tensorflow_frontend.so.* + +%files -n lib%{name}_tensorflow_lite_frontend%{so_ver} +%{_libdir}/libopenvino_tensorflow_lite_frontend.so.* + +%files -n %{name}-sample +%license LICENSE +%{_datadir}/%{prj_name}/ + +%files -n %{name}-devel +%license LICENSE +%{_includedir}/%{name}/ +%{_libdir}/cmake/%{prj_name}/ +%{_libdir}/*.so +%{_libdir}/pkgconfig/openvino.pc + +%files %{python_files openvino} +%license LICENSE +%{python_sitearch}/openvino/ +%{python_sitearch}/openvino*-info/ + +%changelog