From 98cd3ecc18a16f5c8c1caf64b7c659ce546f58b53990edbe020f6de589a26a0e Mon Sep 17 00:00:00 2001 From: Guillaume GARDET Date: Tue, 1 Oct 2019 14:35:20 +0000 Subject: [PATCH] Accepting request 734348 from home:Guillaume_G:branches:science:machinelearning - Re-enable TensorFlow Parser for TW - Add openCL flavor - Fix armv7 OBS-URL: https://build.opensuse.org/request/show/734348 OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=7 --- _constraints | 4 +- _multibuild | 3 + armnn-fix_arm32.patch | 79 +++++++++++ armnn-fix_arm32_dep.patch | 284 ++++++++++++++++++++++++++++++++++++++ armnn-rpmlintrc | 4 + armnn.changes | 32 ++++- armnn.spec | 169 ++++++++++++++++------- 7 files changed, 525 insertions(+), 50 deletions(-) create mode 100644 _multibuild create mode 100644 armnn-fix_arm32.patch create mode 100644 armnn-fix_arm32_dep.patch create mode 100644 armnn-rpmlintrc diff --git a/_constraints b/_constraints index c083f89..3ce911a 100644 --- a/_constraints +++ b/_constraints @@ -1,10 +1,10 @@ - 5 + 6 - 2 + 4 diff --git a/_multibuild b/_multibuild new file mode 100644 index 0000000..2b219ad --- /dev/null +++ b/_multibuild @@ -0,0 +1,3 @@ + + opencl + diff --git a/armnn-fix_arm32.patch b/armnn-fix_arm32.patch new file mode 100644 index 0000000..30b011f --- /dev/null +++ b/armnn-fix_arm32.patch @@ -0,0 +1,79 @@ +From d9f7c8ba3949823a623b407f4bd80d120ca0b5be Mon Sep 17 00:00:00 2001 +From: Aron Virginas-Tar +Date: Fri, 13 Sep 2019 13:37:03 +0100 +Subject: [PATCH] IVGCVSW-3858 Fix RefTensorHandleTests on Raspberry Pi + +* Fix alignment check to use sizeof(size_t) instead of a hard-coded value + +Signed-off-by: Aron Virginas-Tar +Change-Id: I092c4464c6cecb2403da9b7744b68ad063ddbad1 +--- + src/backends/backendsCommon/test/EndToEndTestImpl.hpp | 6 ++---- + src/backends/reference/RefTensorHandle.cpp | 5 +++-- + src/backends/reference/test/RefTensorHandleTests.cpp | 10 ++++++---- + 3 files changed, 11 insertions(+), 10 deletions(-) + +diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +index 8a3e44fc..040782bf 100644 +--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp ++++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +@@ -210,14 +210,12 @@ inline void ImportNonAlignedPointerTest(std::vector backends) + }; + + // Misaligned input +- float * misalignedInputData = inputData.data(); +- misalignedInputData++; ++ float* misalignedInputData = reinterpret_cast(reinterpret_cast(inputData.data()) + 1); + + std::vector outputData(5); + + // Misaligned output +- float * misalignedOutputData = outputData.data(); +- misalignedOutputData++; ++ float* misalignedOutputData = reinterpret_cast(reinterpret_cast(outputData.data()) + 1); + + InputTensors inputTensors + { +diff --git a/src/backends/reference/RefTensorHandle.cpp b/src/backends/reference/RefTensorHandle.cpp +index 42ac7f08..84a74edc 100644 +--- a/src/backends/reference/RefTensorHandle.cpp ++++ b/src/backends/reference/RefTensorHandle.cpp +@@ -110,8 +110,9 @@ bool RefTensorHandle::Import(void* memory, MemorySource source) + { + if (source == MemorySource::Malloc) + { +- // Checks the 16 byte memory alignment. +- if (reinterpret_cast(memory) % 16) ++ // Check memory alignment ++ constexpr uintptr_t alignment = sizeof(size_t); ++ if (reinterpret_cast(memory) % alignment) + { + if (m_Imported) + { +diff --git a/src/backends/reference/test/RefTensorHandleTests.cpp b/src/backends/reference/test/RefTensorHandleTests.cpp +index 2c5d6d49..be229bf8 100644 +--- a/src/backends/reference/test/RefTensorHandleTests.cpp ++++ b/src/backends/reference/test/RefTensorHandleTests.cpp +@@ -92,15 +92,17 @@ BOOST_AUTO_TEST_CASE(MisalignedPointer) + TensorInfo info({2}, DataType::Float32); + RefTensorHandle handle(info, memoryManager, static_cast(MemorySource::Malloc)); + +- // Allocates a 2 int array ++ // Allocate a 2 int array + int* testPtr = new int[2]; +- int* misalignedPtr = testPtr + 1; + +- BOOST_CHECK(!handle.Import(static_cast(misalignedPtr), MemorySource::Malloc)); ++ // Increment pointer by 1 byte ++ void* misalignedPtr = static_cast(reinterpret_cast(testPtr) + 1); ++ ++ BOOST_CHECK(!handle.Import(misalignedPtr, MemorySource::Malloc)); + + delete[] testPtr; + } + + #endif + +-BOOST_AUTO_TEST_SUITE_END() +\ No newline at end of file ++BOOST_AUTO_TEST_SUITE_END() diff --git a/armnn-fix_arm32_dep.patch b/armnn-fix_arm32_dep.patch new file mode 100644 index 0000000..437467b --- /dev/null +++ b/armnn-fix_arm32_dep.patch @@ -0,0 +1,284 @@ +From dcaa6109c95034aa3b945acd50a2882e40f13370 Mon Sep 17 00:00:00 2001 +From: Ferran Balaguer +Date: Wed, 21 Aug 2019 13:28:38 +0100 +Subject: [PATCH] IVGCVSW-3175 Add Regression Tests for Zero Copy + +Signed-off-by: Ferran Balaguer +Change-Id: I6f16ea0dca359283a3b187e2f046f82a7dc2ff7c +--- + .../backendsCommon/test/EndToEndTestImpl.hpp | 153 ++++++++++++++++++ + .../reference/test/RefEndToEndTests.cpp | 86 ++-------- + 2 files changed, 167 insertions(+), 72 deletions(-) + +diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +index f8673d69..8a3e44fc 100644 +--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp ++++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +@@ -8,6 +8,7 @@ + + #include + #include ++#include + + #include + +@@ -171,4 +172,156 @@ void EndToEndLayerTestImpl(INetworkPtr network, + } + } + ++inline void ImportNonAlignedPointerTest(std::vector backends) ++{ ++ using namespace armnn; ++ ++ // Create runtime in which test will run ++ IRuntime::CreationOptions options; ++ IRuntimePtr runtime(armnn::IRuntime::Create(options)); ++ ++ // build up the structure of the network ++ INetworkPtr net(INetwork::Create()); ++ ++ IConnectableLayer* input = net->AddInputLayer(0); ++ ++ NormalizationDescriptor descriptor; ++ IConnectableLayer* norm = net->AddNormalizationLayer(descriptor); ++ ++ IConnectableLayer* output = net->AddOutputLayer(0); ++ ++ input->GetOutputSlot(0).Connect(norm->GetInputSlot(0)); ++ norm->GetOutputSlot(0).Connect(output->GetInputSlot(0)); ++ ++ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); ++ norm->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); ++ ++ // Optimize the network ++ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); ++ ++ // Loads it into the runtime. ++ NetworkId netId; ++ runtime->LoadNetwork(netId, std::move(optNet)); ++ ++ // Creates structures for input & output ++ std::vector inputData ++ { ++ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f ++ }; ++ ++ // Misaligned input ++ float * misalignedInputData = inputData.data(); ++ misalignedInputData++; ++ ++ std::vector outputData(5); ++ ++ // Misaligned output ++ float * misalignedOutputData = outputData.data(); ++ misalignedOutputData++; ++ ++ InputTensors inputTensors ++ { ++ {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), misalignedInputData)}, ++ }; ++ OutputTensors outputTensors ++ { ++ {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), misalignedOutputData)} ++ }; ++ ++ // The result of the inference is not important, just the fact that there ++ // should not be CopyMemGeneric workloads. ++ runtime->GetProfiler(netId)->EnableProfiling(true); ++ ++ // Do the inference ++ runtime->EnqueueWorkload(netId, inputTensors, outputTensors); ++ ++ // Retrieve the Profiler.Print() output to get the workload execution ++ ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance(); ++ std::stringstream ss; ++ profilerManager.GetProfiler()->Print(ss);; ++ std::string dump = ss.str(); ++ ++ // Contains RefNormalizationWorkload ++ std::size_t found = dump.find("RefNormalizationWorkload"); ++ BOOST_TEST(found != std::string::npos); ++ // No Contains SyncMemGeneric (Created when importing the output tensor handle) ++ found = dump.find("SyncMemGeneric"); ++ BOOST_TEST(found == std::string::npos); ++ // Contains CopyMemGeneric ++ found = dump.find("CopyMemGeneric"); ++ BOOST_TEST(found != std::string::npos); ++} ++ ++inline void ImportAlignedPointerTest(std::vector backends) ++{ ++ using namespace armnn; ++ ++ // Create runtime in which test will run ++ IRuntime::CreationOptions options; ++ IRuntimePtr runtime(armnn::IRuntime::Create(options)); ++ ++ // build up the structure of the network ++ INetworkPtr net(INetwork::Create()); ++ ++ IConnectableLayer* input = net->AddInputLayer(0); ++ ++ NormalizationDescriptor descriptor; ++ IConnectableLayer* norm = net->AddNormalizationLayer(descriptor); ++ ++ IConnectableLayer* output = net->AddOutputLayer(0); ++ ++ input->GetOutputSlot(0).Connect(norm->GetInputSlot(0)); ++ norm->GetOutputSlot(0).Connect(output->GetInputSlot(0)); ++ ++ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); ++ norm->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); ++ ++ // Optimize the network ++ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); ++ ++ // Loads it into the runtime. ++ NetworkId netId; ++ runtime->LoadNetwork(netId, std::move(optNet)); ++ ++ // Creates structures for input & output ++ std::vector inputData ++ { ++ 1.0f, 2.0f, 3.0f, 4.0f ++ }; ++ ++ std::vector outputData(4); ++ ++ InputTensors inputTensors ++ { ++ {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}, ++ }; ++ OutputTensors outputTensors ++ { ++ {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())} ++ }; ++ ++ // The result of the inference is not important, just the fact that there ++ // should not be CopyMemGeneric workloads. ++ runtime->GetProfiler(netId)->EnableProfiling(true); ++ ++ // Do the inference ++ runtime->EnqueueWorkload(netId, inputTensors, outputTensors); ++ ++ // Retrieve the Profiler.Print() output to get the workload execution ++ ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance(); ++ std::stringstream ss; ++ profilerManager.GetProfiler()->Print(ss);; ++ std::string dump = ss.str(); ++ ++ // Contains RefNormalizationWorkload ++ std::size_t found = dump.find("RefNormalizationWorkload"); ++ BOOST_TEST(found != std::string::npos); ++ // Contains SyncMemGeneric ++ found = dump.find("SyncMemGeneric"); ++ BOOST_TEST(found != std::string::npos); ++ // No contains CopyMemGeneric ++ found = dump.find("CopyMemGeneric"); ++ BOOST_TEST(found == std::string::npos); ++} ++ + } // anonymous namespace +diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp +index 31e9b339..ee42c9e9 100644 +--- a/src/backends/reference/test/RefEndToEndTests.cpp ++++ b/src/backends/reference/test/RefEndToEndTests.cpp +@@ -322,78 +322,6 @@ BOOST_AUTO_TEST_CASE(TrivialMin) + BOOST_TEST(outputData[3] == 2); + } + +-BOOST_AUTO_TEST_CASE(RefNoCopyWorkloads) +-{ +- using namespace armnn; +- +- // Create runtime in which test will run +- IRuntime::CreationOptions options; +- IRuntimePtr runtime(armnn::IRuntime::Create(options)); +- +- // build up the structure of the network +- INetworkPtr net(INetwork::Create()); +- +- IConnectableLayer* input = net->AddInputLayer(0); +- +- NormalizationDescriptor descriptor; +- IConnectableLayer* norm = net->AddNormalizationLayer(descriptor); +- +- IConnectableLayer* output = net->AddOutputLayer(0); +- +- input->GetOutputSlot(0).Connect(norm->GetInputSlot(0)); +- norm->GetOutputSlot(0).Connect(output->GetInputSlot(0)); +- +- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); +- norm->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32)); +- +- // Optimize the network +- IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec()); +- +- // Loads it into the runtime. +- NetworkId netId; +- runtime->LoadNetwork(netId, std::move(optNet)); +- +- // Creates structures for input & output +- std::vector inputData +- { +- 1.0f, 2.0f, 3.0f, 4.0f +- }; +- +- std::vector outputData(4); +- +- InputTensors inputTensors +- { +- {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}, +- }; +- OutputTensors outputTensors +- { +- {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())} +- }; +- +- // The result of the inference is not important, just the fact that there +- // should not be CopyMemGeneric workloads. +- runtime->GetProfiler(netId)->EnableProfiling(true); +- +- // Do the inference +- runtime->EnqueueWorkload(netId, inputTensors, outputTensors); +- +- // Retrieve the Profiler.Print() output to get the workload execution +- ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance(); +- std::stringstream ss; +- profilerManager.GetProfiler()->Print(ss);; +- std::string dump = ss.str(); +- +- // Contains RefNormalizationWorkload +- std::size_t found = dump.find("RefNormalizationWorkload"); +- BOOST_TEST(found != std::string::npos); +- // Contains SyncMemGeneric +- found = dump.find("SyncMemGeneric"); +- BOOST_TEST(found != std::string::npos); +- // No contains CopyMemGeneric +- found = dump.find("CopyMemGeneric"); +- BOOST_TEST(found == std::string::npos); +-} +- + BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndTest) + { + const std::vector expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0, +@@ -1023,4 +951,18 @@ BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NhwcTest) + ResizeNearestNeighborEndToEnd(defaultBackends, armnn::DataLayout::NHWC); + } + ++#if !defined(__ANDROID__) ++// Only run these tests on non Android platforms ++BOOST_AUTO_TEST_CASE(RefImportNonAlignedPointerTest) ++{ ++ ImportNonAlignedPointerTest(defaultBackends); ++} ++ ++BOOST_AUTO_TEST_CASE(RefImportAlignedPointerTest) ++{ ++ ImportAlignedPointerTest(defaultBackends); ++} ++ ++#endif ++ + BOOST_AUTO_TEST_SUITE_END() +\ No newline at end of file diff --git a/armnn-rpmlintrc b/armnn-rpmlintrc new file mode 100644 index 0000000..2eac815 --- /dev/null +++ b/armnn-rpmlintrc @@ -0,0 +1,4 @@ +# This line is mandatory to access the configuration functions +from Config import * + +addFilter(".*opencl.* shlib-policy-name-error") diff --git a/armnn.changes b/armnn.changes index 920408b..768d9a9 100644 --- a/armnn.changes +++ b/armnn.changes @@ -1,9 +1,39 @@ +------------------------------------------------------------------- +Tue Oct 1 14:30:13 UTC 2019 - Guillaume GARDET + +- Re-enable Tensorflow on Tumbleweed as boo#1151150 is now fixed + +------------------------------------------------------------------- +Thu Sep 19 07:21:48 UTC 2019 - Guillaume GARDET + +- Update _constraints + +------------------------------------------------------------------- +Wed Sep 18 12:26:55 UTC 2019 - Guillaume GARDET + +- Enable openCL only on a separate flavor as runtime requires + libOpenCL.so, libGLES_mali.so, or libmali.so + +------------------------------------------------------------------- +Wed Sep 18 11:53:30 UTC 2019 - Guillaume GARDET + +- Enable openCL support +- Disable UnitTests when openCL support is enabled as it fails + on OBS workers + +------------------------------------------------------------------- +Wed Sep 18 09:57:39 UTC 2019 - Guillaume GARDET + +- Add patches to make UnitTests to pass on armv7: + * armnn-fix_arm32.patch + * armnn-fix_arm32_dep.patch + ------------------------------------------------------------------- Thu Sep 5 11:22:41 UTC 2019 - Guillaume GARDET - Disable TensorFlow as on 15.1 only x86_64 succeed and on TW we have incompatibility with protobuf (3.8.0 in TW and - Tensorflow uses 3.6.1 internally) + Tensorflow uses 3.6.1 internally) - boo#1151150 ------------------------------------------------------------------- Thu Sep 5 07:25:16 UTC 2019 - Guillaume GARDET diff --git a/armnn.spec b/armnn.spec index 55eb4f6..e8dfa01 100644 --- a/armnn.spec +++ b/armnn.spec @@ -15,9 +15,15 @@ # Please submit bugfixes or comments via http://bugs.opensuse.org/ # +%define target @BUILD_FLAVOR@%{nil} + # Disable LTO until lto link is fixed - https://github.com/ARM-software/armnn/issues/251 %define _lto_cflags %{nil} +%if "%{target}" != "" +%define package_suffix -%{target} +%endif + # Compute library has neon enabled for aarch64 only %ifarch aarch64 %bcond_without compute_neon @@ -25,8 +31,11 @@ %bcond_with compute_neon %endif -# Disable OpenCL from Compute library, as check fails +%if "%{target}" == "opencl" +%bcond_without compute_cl +%else %bcond_with compute_cl +%endif # stb-devel is available on Leap 15.1+ %if 0%{?suse_version} > 1500 || 0%{?sle_version} > 150000 && 0%{?is_opensuse} @@ -45,27 +54,40 @@ # Enable CAFFE %bcond_without armnn_caffe -# Disable TensorFlow as on 15.1+ only x86_64 succeed and on TW we have incompatibility with protobuf (3.8.0 in TW for Caffe and Tensorflow uses 3.6.1) +# Enable TensorFlow only on TW aarch64 and x86_64 (TF fails to build on Leap 15.x) +%if 0%{?suse_version} > 1500 +%ifarch aarch64 x86_64 +%bcond_without armnn_tf +%else %bcond_with armnn_tf +%endif # ifarch +%else # suse_version +%bcond_with armnn_tf +%endif # suse_version %define version_major 19 %define version_minor 08 -Name: armnn +Name: armnn%{?package_suffix} Version: %{version_major}.%{version_minor} Release: 0 Summary: Arm NN SDK enables machine learning workloads on power-efficient devices License: MIT Group: Development/Libraries/Other Url: https://developer.arm.com/products/processors/machine-learning/arm-nn -Source0: https://github.com/ARM-software/armnn/archive/v%{version}.tar.gz#/%{name}-%{version}.tar.gz -Patch1: armnn-fix_boost.patch +Source0: https://github.com/ARM-software/armnn/archive/v%{version}.tar.gz#/armnn-%{version}.tar.gz +Source1: armnn-rpmlintrc # PATCH: based on http://arago-project.org/git/?p=meta-arago.git;a=blob;f=meta-arago-extras/recipes-support/armnn/armnn/0004-generate-versioned-library.patch;hb=master -Patch2: armnn-generate-versioned-library.patch +Patch1: armnn-generate-versioned-library.patch # Patch: http://arago-project.org/git/?p=meta-arago.git;a=blob;f=meta-arago-extras/recipes-support/armnn/armnn/0007-enable-use-of-arm-compute-shared-library.patch;hb=master -Patch3: 0007-enable-use-of-arm-compute-shared-library.patch +Patch2: 0007-enable-use-of-arm-compute-shared-library.patch +# PATCH-FIX-UPSTREAM - https://github.com/ARM-software/armnn/issues/274 +Patch3: armnn-fix_boost.patch +# PATCH-FIX-UPSTREAM - https://github.com/ARM-software/armnn/issues/266 +Patch4: armnn-fix_arm32_dep.patch +Patch5: armnn-fix_arm32.patch # https://github.com/ARM-software/armnn/issues/207 -# FIXME: remove this patch once *.pb.cc files are packaged properly in tensorflow-devel +# FIXME: remove this patch once *.pb.cc files are packaged properly in tensorflow-devel - https://github.com/ARM-software/armnn/issues/269 Patch100: armnn-fix_tensorflow_link.patch %if 0%{?suse_version} < 1330 BuildRequires: boost-devel >= 1.59 @@ -102,6 +124,27 @@ BuildRequires: stb-devel BuildRequires: tensorflow-devel %endif BuildRequires: valgrind-devel +%if %{with compute_cl} +Recommends: Mesa-libOpenCL +%endif +# Make armnn-opencl pulls lib*-opencl, and armnn pulls non opencl libs +Requires: libarmnn%{version_major}%{?package_suffix} = %{version} +%if %{with armnn_flatbuffers} +Requires: libarmnnSerializer%{version_major}%{?package_suffix} = %{version} +Requires: libarmnnTfLiteParser%{version_major}%{?package_suffix} = %{version} +%endif +%if %{with armnn_caffe} +Requires: libarmnnCaffeParser%{version_major}%{?package_suffix} = %{version} +%endif +%if %{with armnn_tf} +Requires: libarmnnTfParser%{version_major}%{?package_suffix} = %{version} +%endif +# Make sure we do not install both openCL and non-openCL (CPU only) versions. +%if "%{target}" == "opencl" +Conflicts: armnn +%else +Conflicts: armnn-opencl +%endif BuildRoot: %{_tmppath}/%{name}-%{version}-build %description @@ -115,12 +158,16 @@ modification – across Arm Cortex CPUs and Arm Mali GPUs. Summary: Development headers and libraries for armnn Group: Development/Libraries/C and C++ Requires: %{name} = %{version} -Requires: lib%{name}%{version_major} = %{version} +Requires: libarmnn%{version_major}%{?package_suffix} = %{version} %if %{with armnn_flatbuffers} -Requires: libarmnnSerializer%{version_major} = %{version} +Requires: libarmnnSerializer%{version_major}%{?package_suffix} = %{version} +Requires: libarmnnTfLiteParser%{version_major}%{?package_suffix} = %{version} %endif %if %{with armnn_caffe} -Requires: libarmnnCaffeParser%{version_major} = %{version} +Requires: libarmnnCaffeParser%{version_major}%{?package_suffix} = %{version} +%endif +%if %{with armnn_tf} +Requires: libarmnnTfParser%{version_major}%{?package_suffix} = %{version} %endif %description devel @@ -132,11 +179,16 @@ modification – across Arm Cortex CPUs and Arm Mali GPUs. This package contains the development libraries and headers for armnn. -%package -n lib%{name}%{version_major} -Summary: lib%{name} from armnn +%package -n libarmnn%{version_major}%{?package_suffix} +%if "%{target}" == "opencl" +Conflicts: libarmnn%{version_major} +%else +Conflicts: libarmnn%{version_major}-opencl +%endif +Summary: libarmnn from armnn Group: Development/Libraries/C and C++ -%description -n lib%{name}%{version_major} +%description -n libarmnn%{version_major}%{?package_suffix} Arm NN is an inference engine for CPUs, GPUs and NPUs. It bridges the gap between existing NN frameworks and the underlying IP. It enables efficient translation of existing neural network frameworks, @@ -146,11 +198,16 @@ modification – across Arm Cortex CPUs and Arm Mali GPUs. This package contains the libarmnn library from armnn. %if %{with armnn_flatbuffers} -%package -n libarmnnSerializer%{version_major} +%package -n libarmnnSerializer%{version_major}%{?package_suffix} +%if "%{target}" == "opencl" +Conflicts: libarmnnSerializer%{version_major} +%else +Conflicts: libarmnnSerializer%{version_major}-opencl +%endif Summary: libarmnnSerializer from armnn Group: Development/Libraries/C and C++ -%description -n libarmnnSerializer%{version_major} +%description -n libarmnnSerializer%{version_major}%{?package_suffix} Arm NN is an inference engine for CPUs, GPUs and NPUs. It bridges the gap between existing NN frameworks and the underlying IP. It enables efficient translation of existing neural network frameworks, @@ -159,11 +216,16 @@ modification – across Arm Cortex CPUs and Arm Mali GPUs. This package contains the libarmnnSerializer library from armnn. -%package -n libarmnnTfLiteParser%{version_major} +%package -n libarmnnTfLiteParser%{version_major}%{?package_suffix} +%if "%{target}" == "opencl" +Conflicts: libarmnnTfLiteParser%{version_major} +%else +Conflicts: libarmnnTfLiteParser%{version_major}-opencl +%endif Summary: libarmnnTfLiteParser from armnn Group: Development/Libraries/C and C++ -%description -n libarmnnTfLiteParser%{version_major} +%description -n libarmnnTfLiteParser%{version_major}%{?package_suffix} Arm NN is an inference engine for CPUs, GPUs and NPUs. It bridges the gap between existing NN frameworks and the underlying IP. It enables efficient translation of existing neural network frameworks, @@ -174,11 +236,16 @@ This package contains the libarmnnTfLiteParser library from armnn. %endif %if %{with armnn_tf} -%package -n libarmnnTfParser%{version_major} +%package -n libarmnnTfParser%{version_major}%{?package_suffix} +%if "%{target}" == "opencl" +Conflicts: libarmnnTfParser%{version_major} +%else +Conflicts: libarmnnTfParser%{version_major}-opencl +%endif Summary: libarmnnTfParser from armnn Group: Development/Libraries/C and C++ -%description -n libarmnnTfParser%{version_major} +%description -n libarmnnTfParser%{version_major}%{?package_suffix} Arm NN is an inference engine for CPUs, GPUs and NPUs. It bridges the gap between existing NN frameworks and the underlying IP. It enables efficient translation of existing neural network frameworks, @@ -189,11 +256,16 @@ This package contains the libarmnnTfParser library from armnn. %endif %if %{with armnn_caffe} -%package -n libarmnnCaffeParser%{version_major} +%package -n libarmnnCaffeParser%{version_major}%{?package_suffix} +%if "%{target}" == "opencl" +Conflicts: libarmnnCaffeParser%{version_major} +%else +Conflicts: libarmnnCaffeParser%{version_major}-opencl +%endif Summary: libarmnnCaffeParser from armnn Group: Development/Libraries/C and C++ -%description -n libarmnnCaffeParser%{version_major} +%description -n libarmnnCaffeParser%{version_major}%{?package_suffix} Arm NN is an inference engine for CPUs, GPUs and NPUs. It bridges the gap between existing NN frameworks and the underlying IP. It enables efficient translation of existing neural network frameworks, @@ -204,10 +276,12 @@ This package contains the libarmnnCaffeParser library from armnn. %endif %prep -%setup -q +%setup -q -n armnn-%{version} %patch1 -p1 %patch2 -p1 %patch3 -p1 +%patch4 -p1 +%patch5 -p1 %patch100 -p1 # Boost fixes for dynamic linking sed -i 's/add_definitions("-DBOOST_ALL_NO_LIB")/add_definitions("-DBOOST_ALL_DYN_LINK")/' ./cmake/GlobalConfig.cmake @@ -305,32 +379,33 @@ find ./build/tests -maxdepth 1 -type f -executable -exec cp $CP_ARGS {} %{buildr cp $CP_ARGS ./build/samples/SimpleSample %{buildroot}%{_bindir} %endif -%if %{with armnn_tests} +# openCL UnitTests are failing in OBS due to the lack of openCL device +%if %{without compute_cl} && %{with armnn_tests} %check # Run tests LD_LIBRARY_PATH="$(pwd)/build/" \ ./build/UnitTests %endif -%post -n lib%{name}%{version_major} -p /sbin/ldconfig -%postun -n lib%{name}%{version_major} -p /sbin/ldconfig +%post -n libarmnn%{version_major}%{?package_suffix} -p /sbin/ldconfig +%postun -n libarmnn%{version_major}%{?package_suffix} -p /sbin/ldconfig %if %{with armnn_flatbuffers} -%post -n libarmnnSerializer%{version_major} -p /sbin/ldconfig -%postun -n libarmnnSerializer%{version_major} -p /sbin/ldconfig +%post -n libarmnnSerializer%{version_major}%{?package_suffix} -p /sbin/ldconfig +%postun -n libarmnnSerializer%{version_major}%{?package_suffix} -p /sbin/ldconfig -%post -n libarmnnTfLiteParser%{version_major} -p /sbin/ldconfig -%postun -n libarmnnTfLiteParser%{version_major} -p /sbin/ldconfig +%post -n libarmnnTfLiteParser%{version_major}%{?package_suffix} -p /sbin/ldconfig +%postun -n libarmnnTfLiteParser%{version_major}%{?package_suffix} -p /sbin/ldconfig %endif %if %{with armnn_tf} -%post -n libarmnnTfParser%{version_major} -p /sbin/ldconfig -%postun -n libarmnnTfParser%{version_major} -p /sbin/ldconfig +%post -n libarmnnTfParser%{version_major}%{?package_suffix} -p /sbin/ldconfig +%postun -n libarmnnTfParser%{version_major}%{?package_suffix} -p /sbin/ldconfig %endif %if %{with armnn_caffe} -%post -n libarmnnCaffeParser%{version_major} -p /sbin/ldconfig -%postun -n libarmnnCaffeParser%{version_major} -p /sbin/ldconfig +%post -n libarmnnCaffeParser%{version_major}%{?package_suffix} -p /sbin/ldconfig +%postun -n libarmnnCaffeParser%{version_major}%{?package_suffix} -p /sbin/ldconfig %endif @@ -338,6 +413,12 @@ LD_LIBRARY_PATH="$(pwd)/build/" \ %defattr(-,root,root) %doc README.md %license LICENSE +%if %{with armnn_tests} +%{_bindir}/ExecuteNetwork +%if %{with armnn_caffe} +%{_bindir}/Caffe*-Armnn +%{_bindir}/MultipleNetworksCifar10 +%endif %if %{with armnn_flatbuffers} %{_bindir}/TfLite*-Armnn %{_bindir}/Image*Generator @@ -345,35 +426,29 @@ LD_LIBRARY_PATH="$(pwd)/build/" \ %if %{with armnn_tf} %{_bindir}/Tf*-Armnn %endif -%if %{with armnn_tests} -%{_bindir}/ExecuteNetwork -%if %{with armnn_caffe} -%{_bindir}/Caffe*-Armnn -%{_bindir}/MultipleNetworksCifar10 -%endif -%endif %if %{with armnn_flatbuffers} %{_bindir}/SimpleSample %endif +%endif -%files -n lib%{name}%{version_major} -%{_libdir}/lib%{name}.so.* +%files -n libarmnn%{version_major}%{?package_suffix} +%{_libdir}/libarmnn.so.* %if %{with armnn_flatbuffers} -%files -n libarmnnSerializer%{version_major} +%files -n libarmnnSerializer%{version_major}%{?package_suffix} %{_libdir}/libarmnnSerializer.so.* -%files -n libarmnnTfLiteParser%{version_major} +%files -n libarmnnTfLiteParser%{version_major}%{?package_suffix} %{_libdir}/libarmnnTfLiteParser.so.* %endif %if %{with armnn_tf} -%files -n libarmnnTfParser%{version_major} +%files -n libarmnnTfParser%{version_major}%{?package_suffix} %{_libdir}/libarmnnTfParser.so.* %endif %if %{with armnn_caffe} -%files -n libarmnnCaffeParser%{version_major} +%files -n libarmnnCaffeParser%{version_major}%{?package_suffix} %{_libdir}/libarmnnCaffeParser.so.* %endif