Accepting request 734348 from home:Guillaume_G:branches:science:machinelearning

- Re-enable TensorFlow Parser for TW
- Add openCL flavor
- Fix armv7

OBS-URL: https://build.opensuse.org/request/show/734348
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=7
This commit is contained in:
Guillaume GARDET 2019-10-01 14:35:20 +00:00 committed by Git OBS Bridge
parent 6670a974c5
commit 98cd3ecc18
7 changed files with 525 additions and 50 deletions

View File

@ -1,10 +1,10 @@
<constraints> <constraints>
<hardware> <hardware>
<memory> <memory>
<size unit="G">5</size> <size unit="G">6</size>
</memory> </memory>
<disk> <disk>
<size unit="G">2</size> <size unit="G">4</size>
</disk> </disk>
</hardware> </hardware>
</constraints> </constraints>

3
_multibuild Normal file
View File

@ -0,0 +1,3 @@
<multibuild>
<flavor>opencl</flavor>
</multibuild>

79
armnn-fix_arm32.patch Normal file
View File

@ -0,0 +1,79 @@
From d9f7c8ba3949823a623b407f4bd80d120ca0b5be Mon Sep 17 00:00:00 2001
From: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Date: Fri, 13 Sep 2019 13:37:03 +0100
Subject: [PATCH] IVGCVSW-3858 Fix RefTensorHandleTests on Raspberry Pi
* Fix alignment check to use sizeof(size_t) instead of a hard-coded value
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I092c4464c6cecb2403da9b7744b68ad063ddbad1
---
src/backends/backendsCommon/test/EndToEndTestImpl.hpp | 6 ++----
src/backends/reference/RefTensorHandle.cpp | 5 +++--
src/backends/reference/test/RefTensorHandleTests.cpp | 10 ++++++----
3 files changed, 11 insertions(+), 10 deletions(-)
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 8a3e44fc..040782bf 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -210,14 +210,12 @@ inline void ImportNonAlignedPointerTest(std::vector<BackendId> backends)
};
// Misaligned input
- float * misalignedInputData = inputData.data();
- misalignedInputData++;
+ float* misalignedInputData = reinterpret_cast<float*>(reinterpret_cast<char*>(inputData.data()) + 1);
std::vector<float> outputData(5);
// Misaligned output
- float * misalignedOutputData = outputData.data();
- misalignedOutputData++;
+ float* misalignedOutputData = reinterpret_cast<float*>(reinterpret_cast<char*>(outputData.data()) + 1);
InputTensors inputTensors
{
diff --git a/src/backends/reference/RefTensorHandle.cpp b/src/backends/reference/RefTensorHandle.cpp
index 42ac7f08..84a74edc 100644
--- a/src/backends/reference/RefTensorHandle.cpp
+++ b/src/backends/reference/RefTensorHandle.cpp
@@ -110,8 +110,9 @@ bool RefTensorHandle::Import(void* memory, MemorySource source)
{
if (source == MemorySource::Malloc)
{
- // Checks the 16 byte memory alignment.
- if (reinterpret_cast<uint64_t>(memory) % 16)
+ // Check memory alignment
+ constexpr uintptr_t alignment = sizeof(size_t);
+ if (reinterpret_cast<uintptr_t>(memory) % alignment)
{
if (m_Imported)
{
diff --git a/src/backends/reference/test/RefTensorHandleTests.cpp b/src/backends/reference/test/RefTensorHandleTests.cpp
index 2c5d6d49..be229bf8 100644
--- a/src/backends/reference/test/RefTensorHandleTests.cpp
+++ b/src/backends/reference/test/RefTensorHandleTests.cpp
@@ -92,15 +92,17 @@ BOOST_AUTO_TEST_CASE(MisalignedPointer)
TensorInfo info({2}, DataType::Float32);
RefTensorHandle handle(info, memoryManager, static_cast<unsigned int>(MemorySource::Malloc));
- // Allocates a 2 int array
+ // Allocate a 2 int array
int* testPtr = new int[2];
- int* misalignedPtr = testPtr + 1;
- BOOST_CHECK(!handle.Import(static_cast<void *>(misalignedPtr), MemorySource::Malloc));
+ // Increment pointer by 1 byte
+ void* misalignedPtr = static_cast<void*>(reinterpret_cast<char*>(testPtr) + 1);
+
+ BOOST_CHECK(!handle.Import(misalignedPtr, MemorySource::Malloc));
delete[] testPtr;
}
#endif
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+BOOST_AUTO_TEST_SUITE_END()

284
armnn-fix_arm32_dep.patch Normal file
View File

@ -0,0 +1,284 @@
From dcaa6109c95034aa3b945acd50a2882e40f13370 Mon Sep 17 00:00:00 2001
From: Ferran Balaguer <ferran.balaguer@arm.com>
Date: Wed, 21 Aug 2019 13:28:38 +0100
Subject: [PATCH] IVGCVSW-3175 Add Regression Tests for Zero Copy
Signed-off-by: Ferran Balaguer <ferran.balaguer@arm.com>
Change-Id: I6f16ea0dca359283a3b187e2f046f82a7dc2ff7c
---
.../backendsCommon/test/EndToEndTestImpl.hpp | 153 ++++++++++++++++++
.../reference/test/RefEndToEndTests.cpp | 86 ++--------
2 files changed, 167 insertions(+), 72 deletions(-)
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index f8673d69..8a3e44fc 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -8,6 +8,7 @@
#include <armnn/ArmNN.hpp>
#include <armnn/INetwork.hpp>
+#include <Profiling.hpp>
#include <backendsCommon/test/QuantizeHelper.hpp>
@@ -171,4 +172,156 @@ void EndToEndLayerTestImpl(INetworkPtr network,
}
}
+inline void ImportNonAlignedPointerTest(std::vector<BackendId> backends)
+{
+ using namespace armnn;
+
+ // Create runtime in which test will run
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ // build up the structure of the network
+ INetworkPtr net(INetwork::Create());
+
+ IConnectableLayer* input = net->AddInputLayer(0);
+
+ NormalizationDescriptor descriptor;
+ IConnectableLayer* norm = net->AddNormalizationLayer(descriptor);
+
+ IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input->GetOutputSlot(0).Connect(norm->GetInputSlot(0));
+ norm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
+ norm->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
+
+ // Optimize the network
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+
+ // Loads it into the runtime.
+ NetworkId netId;
+ runtime->LoadNetwork(netId, std::move(optNet));
+
+ // Creates structures for input & output
+ std::vector<float> inputData
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f
+ };
+
+ // Misaligned input
+ float * misalignedInputData = inputData.data();
+ misalignedInputData++;
+
+ std::vector<float> outputData(5);
+
+ // Misaligned output
+ float * misalignedOutputData = outputData.data();
+ misalignedOutputData++;
+
+ InputTensors inputTensors
+ {
+ {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), misalignedInputData)},
+ };
+ OutputTensors outputTensors
+ {
+ {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), misalignedOutputData)}
+ };
+
+ // The result of the inference is not important, just the fact that there
+ // should not be CopyMemGeneric workloads.
+ runtime->GetProfiler(netId)->EnableProfiling(true);
+
+ // Do the inference
+ runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+ // Retrieve the Profiler.Print() output to get the workload execution
+ ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
+ std::stringstream ss;
+ profilerManager.GetProfiler()->Print(ss);;
+ std::string dump = ss.str();
+
+ // Contains RefNormalizationWorkload
+ std::size_t found = dump.find("RefNormalizationWorkload");
+ BOOST_TEST(found != std::string::npos);
+ // No Contains SyncMemGeneric (Created when importing the output tensor handle)
+ found = dump.find("SyncMemGeneric");
+ BOOST_TEST(found == std::string::npos);
+ // Contains CopyMemGeneric
+ found = dump.find("CopyMemGeneric");
+ BOOST_TEST(found != std::string::npos);
+}
+
+inline void ImportAlignedPointerTest(std::vector<BackendId> backends)
+{
+ using namespace armnn;
+
+ // Create runtime in which test will run
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ // build up the structure of the network
+ INetworkPtr net(INetwork::Create());
+
+ IConnectableLayer* input = net->AddInputLayer(0);
+
+ NormalizationDescriptor descriptor;
+ IConnectableLayer* norm = net->AddNormalizationLayer(descriptor);
+
+ IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input->GetOutputSlot(0).Connect(norm->GetInputSlot(0));
+ norm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
+ norm->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
+
+ // Optimize the network
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+
+ // Loads it into the runtime.
+ NetworkId netId;
+ runtime->LoadNetwork(netId, std::move(optNet));
+
+ // Creates structures for input & output
+ std::vector<float> inputData
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+
+ std::vector<float> outputData(4);
+
+ InputTensors inputTensors
+ {
+ {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
+ };
+ OutputTensors outputTensors
+ {
+ {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
+ };
+
+ // The result of the inference is not important, just the fact that there
+ // should not be CopyMemGeneric workloads.
+ runtime->GetProfiler(netId)->EnableProfiling(true);
+
+ // Do the inference
+ runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+ // Retrieve the Profiler.Print() output to get the workload execution
+ ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
+ std::stringstream ss;
+ profilerManager.GetProfiler()->Print(ss);;
+ std::string dump = ss.str();
+
+ // Contains RefNormalizationWorkload
+ std::size_t found = dump.find("RefNormalizationWorkload");
+ BOOST_TEST(found != std::string::npos);
+ // Contains SyncMemGeneric
+ found = dump.find("SyncMemGeneric");
+ BOOST_TEST(found != std::string::npos);
+ // No contains CopyMemGeneric
+ found = dump.find("CopyMemGeneric");
+ BOOST_TEST(found == std::string::npos);
+}
+
} // anonymous namespace
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 31e9b339..ee42c9e9 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -322,78 +322,6 @@ BOOST_AUTO_TEST_CASE(TrivialMin)
BOOST_TEST(outputData[3] == 2);
}
-BOOST_AUTO_TEST_CASE(RefNoCopyWorkloads)
-{
- using namespace armnn;
-
- // Create runtime in which test will run
- IRuntime::CreationOptions options;
- IRuntimePtr runtime(armnn::IRuntime::Create(options));
-
- // build up the structure of the network
- INetworkPtr net(INetwork::Create());
-
- IConnectableLayer* input = net->AddInputLayer(0);
-
- NormalizationDescriptor descriptor;
- IConnectableLayer* norm = net->AddNormalizationLayer(descriptor);
-
- IConnectableLayer* output = net->AddOutputLayer(0);
-
- input->GetOutputSlot(0).Connect(norm->GetInputSlot(0));
- norm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
-
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
- norm->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
-
- // Optimize the network
- IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
-
- // Loads it into the runtime.
- NetworkId netId;
- runtime->LoadNetwork(netId, std::move(optNet));
-
- // Creates structures for input & output
- std::vector<float> inputData
- {
- 1.0f, 2.0f, 3.0f, 4.0f
- };
-
- std::vector<float> outputData(4);
-
- InputTensors inputTensors
- {
- {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
- };
- OutputTensors outputTensors
- {
- {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
- };
-
- // The result of the inference is not important, just the fact that there
- // should not be CopyMemGeneric workloads.
- runtime->GetProfiler(netId)->EnableProfiling(true);
-
- // Do the inference
- runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
-
- // Retrieve the Profiler.Print() output to get the workload execution
- ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
- std::stringstream ss;
- profilerManager.GetProfiler()->Print(ss);;
- std::string dump = ss.str();
-
- // Contains RefNormalizationWorkload
- std::size_t found = dump.find("RefNormalizationWorkload");
- BOOST_TEST(found != std::string::npos);
- // Contains SyncMemGeneric
- found = dump.find("SyncMemGeneric");
- BOOST_TEST(found != std::string::npos);
- // No contains CopyMemGeneric
- found = dump.find("CopyMemGeneric");
- BOOST_TEST(found == std::string::npos);
-}
-
BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndTest)
{
const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
@@ -1023,4 +951,18 @@ BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NhwcTest)
ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
}
+#if !defined(__ANDROID__)
+// Only run these tests on non Android platforms
+BOOST_AUTO_TEST_CASE(RefImportNonAlignedPointerTest)
+{
+ ImportNonAlignedPointerTest(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefImportAlignedPointerTest)
+{
+ ImportAlignedPointerTest(defaultBackends);
+}
+
+#endif
+
BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file

4
armnn-rpmlintrc Normal file
View File

@ -0,0 +1,4 @@
# This line is mandatory to access the configuration functions
from Config import *
addFilter(".*opencl.* shlib-policy-name-error")

View File

@ -1,9 +1,39 @@
-------------------------------------------------------------------
Tue Oct 1 14:30:13 UTC 2019 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Re-enable Tensorflow on Tumbleweed as boo#1151150 is now fixed
-------------------------------------------------------------------
Thu Sep 19 07:21:48 UTC 2019 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Update _constraints
-------------------------------------------------------------------
Wed Sep 18 12:26:55 UTC 2019 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Enable openCL only on a separate flavor as runtime requires
libOpenCL.so, libGLES_mali.so, or libmali.so
-------------------------------------------------------------------
Wed Sep 18 11:53:30 UTC 2019 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Enable openCL support
- Disable UnitTests when openCL support is enabled as it fails
on OBS workers
-------------------------------------------------------------------
Wed Sep 18 09:57:39 UTC 2019 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Add patches to make UnitTests to pass on armv7:
* armnn-fix_arm32.patch
* armnn-fix_arm32_dep.patch
------------------------------------------------------------------- -------------------------------------------------------------------
Thu Sep 5 11:22:41 UTC 2019 - Guillaume GARDET <guillaume.gardet@opensuse.org> Thu Sep 5 11:22:41 UTC 2019 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Disable TensorFlow as on 15.1 only x86_64 succeed and on TW we - Disable TensorFlow as on 15.1 only x86_64 succeed and on TW we
have incompatibility with protobuf (3.8.0 in TW and have incompatibility with protobuf (3.8.0 in TW and
Tensorflow uses 3.6.1 internally) Tensorflow uses 3.6.1 internally) - boo#1151150
------------------------------------------------------------------- -------------------------------------------------------------------
Thu Sep 5 07:25:16 UTC 2019 - Guillaume GARDET <guillaume.gardet@opensuse.org> Thu Sep 5 07:25:16 UTC 2019 - Guillaume GARDET <guillaume.gardet@opensuse.org>

View File

@ -15,9 +15,15 @@
# Please submit bugfixes or comments via http://bugs.opensuse.org/ # Please submit bugfixes or comments via http://bugs.opensuse.org/
# #
%define target @BUILD_FLAVOR@%{nil}
# Disable LTO until lto link is fixed - https://github.com/ARM-software/armnn/issues/251 # Disable LTO until lto link is fixed - https://github.com/ARM-software/armnn/issues/251
%define _lto_cflags %{nil} %define _lto_cflags %{nil}
%if "%{target}" != ""
%define package_suffix -%{target}
%endif
# Compute library has neon enabled for aarch64 only # Compute library has neon enabled for aarch64 only
%ifarch aarch64 %ifarch aarch64
%bcond_without compute_neon %bcond_without compute_neon
@ -25,8 +31,11 @@
%bcond_with compute_neon %bcond_with compute_neon
%endif %endif
# Disable OpenCL from Compute library, as check fails %if "%{target}" == "opencl"
%bcond_without compute_cl
%else
%bcond_with compute_cl %bcond_with compute_cl
%endif
# stb-devel is available on Leap 15.1+ # stb-devel is available on Leap 15.1+
%if 0%{?suse_version} > 1500 || 0%{?sle_version} > 150000 && 0%{?is_opensuse} %if 0%{?suse_version} > 1500 || 0%{?sle_version} > 150000 && 0%{?is_opensuse}
@ -45,27 +54,40 @@
# Enable CAFFE # Enable CAFFE
%bcond_without armnn_caffe %bcond_without armnn_caffe
# Disable TensorFlow as on 15.1+ only x86_64 succeed and on TW we have incompatibility with protobuf (3.8.0 in TW for Caffe and Tensorflow uses 3.6.1) # Enable TensorFlow only on TW aarch64 and x86_64 (TF fails to build on Leap 15.x)
%if 0%{?suse_version} > 1500
%ifarch aarch64 x86_64
%bcond_without armnn_tf
%else
%bcond_with armnn_tf %bcond_with armnn_tf
%endif # ifarch
%else # suse_version
%bcond_with armnn_tf
%endif # suse_version
%define version_major 19 %define version_major 19
%define version_minor 08 %define version_minor 08
Name: armnn Name: armnn%{?package_suffix}
Version: %{version_major}.%{version_minor} Version: %{version_major}.%{version_minor}
Release: 0 Release: 0
Summary: Arm NN SDK enables machine learning workloads on power-efficient devices Summary: Arm NN SDK enables machine learning workloads on power-efficient devices
License: MIT License: MIT
Group: Development/Libraries/Other Group: Development/Libraries/Other
Url: https://developer.arm.com/products/processors/machine-learning/arm-nn Url: https://developer.arm.com/products/processors/machine-learning/arm-nn
Source0: https://github.com/ARM-software/armnn/archive/v%{version}.tar.gz#/%{name}-%{version}.tar.gz Source0: https://github.com/ARM-software/armnn/archive/v%{version}.tar.gz#/armnn-%{version}.tar.gz
Patch1: armnn-fix_boost.patch Source1: armnn-rpmlintrc
# PATCH: based on http://arago-project.org/git/?p=meta-arago.git;a=blob;f=meta-arago-extras/recipes-support/armnn/armnn/0004-generate-versioned-library.patch;hb=master # PATCH: based on http://arago-project.org/git/?p=meta-arago.git;a=blob;f=meta-arago-extras/recipes-support/armnn/armnn/0004-generate-versioned-library.patch;hb=master
Patch2: armnn-generate-versioned-library.patch Patch1: armnn-generate-versioned-library.patch
# Patch: http://arago-project.org/git/?p=meta-arago.git;a=blob;f=meta-arago-extras/recipes-support/armnn/armnn/0007-enable-use-of-arm-compute-shared-library.patch;hb=master # Patch: http://arago-project.org/git/?p=meta-arago.git;a=blob;f=meta-arago-extras/recipes-support/armnn/armnn/0007-enable-use-of-arm-compute-shared-library.patch;hb=master
Patch3: 0007-enable-use-of-arm-compute-shared-library.patch Patch2: 0007-enable-use-of-arm-compute-shared-library.patch
# PATCH-FIX-UPSTREAM - https://github.com/ARM-software/armnn/issues/274
Patch3: armnn-fix_boost.patch
# PATCH-FIX-UPSTREAM - https://github.com/ARM-software/armnn/issues/266
Patch4: armnn-fix_arm32_dep.patch
Patch5: armnn-fix_arm32.patch
# https://github.com/ARM-software/armnn/issues/207 # https://github.com/ARM-software/armnn/issues/207
# FIXME: remove this patch once *.pb.cc files are packaged properly in tensorflow-devel # FIXME: remove this patch once *.pb.cc files are packaged properly in tensorflow-devel - https://github.com/ARM-software/armnn/issues/269
Patch100: armnn-fix_tensorflow_link.patch Patch100: armnn-fix_tensorflow_link.patch
%if 0%{?suse_version} < 1330 %if 0%{?suse_version} < 1330
BuildRequires: boost-devel >= 1.59 BuildRequires: boost-devel >= 1.59
@ -102,6 +124,27 @@ BuildRequires: stb-devel
BuildRequires: tensorflow-devel BuildRequires: tensorflow-devel
%endif %endif
BuildRequires: valgrind-devel BuildRequires: valgrind-devel
%if %{with compute_cl}
Recommends: Mesa-libOpenCL
%endif
# Make armnn-opencl pulls lib*-opencl, and armnn pulls non opencl libs
Requires: libarmnn%{version_major}%{?package_suffix} = %{version}
%if %{with armnn_flatbuffers}
Requires: libarmnnSerializer%{version_major}%{?package_suffix} = %{version}
Requires: libarmnnTfLiteParser%{version_major}%{?package_suffix} = %{version}
%endif
%if %{with armnn_caffe}
Requires: libarmnnCaffeParser%{version_major}%{?package_suffix} = %{version}
%endif
%if %{with armnn_tf}
Requires: libarmnnTfParser%{version_major}%{?package_suffix} = %{version}
%endif
# Make sure we do not install both openCL and non-openCL (CPU only) versions.
%if "%{target}" == "opencl"
Conflicts: armnn
%else
Conflicts: armnn-opencl
%endif
BuildRoot: %{_tmppath}/%{name}-%{version}-build BuildRoot: %{_tmppath}/%{name}-%{version}-build
%description %description
@ -115,12 +158,16 @@ modification across Arm Cortex CPUs and Arm Mali GPUs.
Summary: Development headers and libraries for armnn Summary: Development headers and libraries for armnn
Group: Development/Libraries/C and C++ Group: Development/Libraries/C and C++
Requires: %{name} = %{version} Requires: %{name} = %{version}
Requires: lib%{name}%{version_major} = %{version} Requires: libarmnn%{version_major}%{?package_suffix} = %{version}
%if %{with armnn_flatbuffers} %if %{with armnn_flatbuffers}
Requires: libarmnnSerializer%{version_major} = %{version} Requires: libarmnnSerializer%{version_major}%{?package_suffix} = %{version}
Requires: libarmnnTfLiteParser%{version_major}%{?package_suffix} = %{version}
%endif %endif
%if %{with armnn_caffe} %if %{with armnn_caffe}
Requires: libarmnnCaffeParser%{version_major} = %{version} Requires: libarmnnCaffeParser%{version_major}%{?package_suffix} = %{version}
%endif
%if %{with armnn_tf}
Requires: libarmnnTfParser%{version_major}%{?package_suffix} = %{version}
%endif %endif
%description devel %description devel
@ -132,11 +179,16 @@ modification across Arm Cortex CPUs and Arm Mali GPUs.
This package contains the development libraries and headers for armnn. This package contains the development libraries and headers for armnn.
%package -n lib%{name}%{version_major} %package -n libarmnn%{version_major}%{?package_suffix}
Summary: lib%{name} from armnn %if "%{target}" == "opencl"
Conflicts: libarmnn%{version_major}
%else
Conflicts: libarmnn%{version_major}-opencl
%endif
Summary: libarmnn from armnn
Group: Development/Libraries/C and C++ Group: Development/Libraries/C and C++
%description -n lib%{name}%{version_major} %description -n libarmnn%{version_major}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs. Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP. It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks, It enables efficient translation of existing neural network frameworks,
@ -146,11 +198,16 @@ modification across Arm Cortex CPUs and Arm Mali GPUs.
This package contains the libarmnn library from armnn. This package contains the libarmnn library from armnn.
%if %{with armnn_flatbuffers} %if %{with armnn_flatbuffers}
%package -n libarmnnSerializer%{version_major} %package -n libarmnnSerializer%{version_major}%{?package_suffix}
%if "%{target}" == "opencl"
Conflicts: libarmnnSerializer%{version_major}
%else
Conflicts: libarmnnSerializer%{version_major}-opencl
%endif
Summary: libarmnnSerializer from armnn Summary: libarmnnSerializer from armnn
Group: Development/Libraries/C and C++ Group: Development/Libraries/C and C++
%description -n libarmnnSerializer%{version_major} %description -n libarmnnSerializer%{version_major}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs. Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP. It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks, It enables efficient translation of existing neural network frameworks,
@ -159,11 +216,16 @@ modification across Arm Cortex CPUs and Arm Mali GPUs.
This package contains the libarmnnSerializer library from armnn. This package contains the libarmnnSerializer library from armnn.
%package -n libarmnnTfLiteParser%{version_major} %package -n libarmnnTfLiteParser%{version_major}%{?package_suffix}
%if "%{target}" == "opencl"
Conflicts: libarmnnTfLiteParser%{version_major}
%else
Conflicts: libarmnnTfLiteParser%{version_major}-opencl
%endif
Summary: libarmnnTfLiteParser from armnn Summary: libarmnnTfLiteParser from armnn
Group: Development/Libraries/C and C++ Group: Development/Libraries/C and C++
%description -n libarmnnTfLiteParser%{version_major} %description -n libarmnnTfLiteParser%{version_major}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs. Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP. It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks, It enables efficient translation of existing neural network frameworks,
@ -174,11 +236,16 @@ This package contains the libarmnnTfLiteParser library from armnn.
%endif %endif
%if %{with armnn_tf} %if %{with armnn_tf}
%package -n libarmnnTfParser%{version_major} %package -n libarmnnTfParser%{version_major}%{?package_suffix}
%if "%{target}" == "opencl"
Conflicts: libarmnnTfParser%{version_major}
%else
Conflicts: libarmnnTfParser%{version_major}-opencl
%endif
Summary: libarmnnTfParser from armnn Summary: libarmnnTfParser from armnn
Group: Development/Libraries/C and C++ Group: Development/Libraries/C and C++
%description -n libarmnnTfParser%{version_major} %description -n libarmnnTfParser%{version_major}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs. Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP. It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks, It enables efficient translation of existing neural network frameworks,
@ -189,11 +256,16 @@ This package contains the libarmnnTfParser library from armnn.
%endif %endif
%if %{with armnn_caffe} %if %{with armnn_caffe}
%package -n libarmnnCaffeParser%{version_major} %package -n libarmnnCaffeParser%{version_major}%{?package_suffix}
%if "%{target}" == "opencl"
Conflicts: libarmnnCaffeParser%{version_major}
%else
Conflicts: libarmnnCaffeParser%{version_major}-opencl
%endif
Summary: libarmnnCaffeParser from armnn Summary: libarmnnCaffeParser from armnn
Group: Development/Libraries/C and C++ Group: Development/Libraries/C and C++
%description -n libarmnnCaffeParser%{version_major} %description -n libarmnnCaffeParser%{version_major}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs. Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP. It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks, It enables efficient translation of existing neural network frameworks,
@ -204,10 +276,12 @@ This package contains the libarmnnCaffeParser library from armnn.
%endif %endif
%prep %prep
%setup -q %setup -q -n armnn-%{version}
%patch1 -p1 %patch1 -p1
%patch2 -p1 %patch2 -p1
%patch3 -p1 %patch3 -p1
%patch4 -p1
%patch5 -p1
%patch100 -p1 %patch100 -p1
# Boost fixes for dynamic linking # Boost fixes for dynamic linking
sed -i 's/add_definitions("-DBOOST_ALL_NO_LIB")/add_definitions("-DBOOST_ALL_DYN_LINK")/' ./cmake/GlobalConfig.cmake sed -i 's/add_definitions("-DBOOST_ALL_NO_LIB")/add_definitions("-DBOOST_ALL_DYN_LINK")/' ./cmake/GlobalConfig.cmake
@ -305,32 +379,33 @@ find ./build/tests -maxdepth 1 -type f -executable -exec cp $CP_ARGS {} %{buildr
cp $CP_ARGS ./build/samples/SimpleSample %{buildroot}%{_bindir} cp $CP_ARGS ./build/samples/SimpleSample %{buildroot}%{_bindir}
%endif %endif
%if %{with armnn_tests} # openCL UnitTests are failing in OBS due to the lack of openCL device
%if %{without compute_cl} && %{with armnn_tests}
%check %check
# Run tests # Run tests
LD_LIBRARY_PATH="$(pwd)/build/" \ LD_LIBRARY_PATH="$(pwd)/build/" \
./build/UnitTests ./build/UnitTests
%endif %endif
%post -n lib%{name}%{version_major} -p /sbin/ldconfig %post -n libarmnn%{version_major}%{?package_suffix} -p /sbin/ldconfig
%postun -n lib%{name}%{version_major} -p /sbin/ldconfig %postun -n libarmnn%{version_major}%{?package_suffix} -p /sbin/ldconfig
%if %{with armnn_flatbuffers} %if %{with armnn_flatbuffers}
%post -n libarmnnSerializer%{version_major} -p /sbin/ldconfig %post -n libarmnnSerializer%{version_major}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnnSerializer%{version_major} -p /sbin/ldconfig %postun -n libarmnnSerializer%{version_major}%{?package_suffix} -p /sbin/ldconfig
%post -n libarmnnTfLiteParser%{version_major} -p /sbin/ldconfig %post -n libarmnnTfLiteParser%{version_major}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnnTfLiteParser%{version_major} -p /sbin/ldconfig %postun -n libarmnnTfLiteParser%{version_major}%{?package_suffix} -p /sbin/ldconfig
%endif %endif
%if %{with armnn_tf} %if %{with armnn_tf}
%post -n libarmnnTfParser%{version_major} -p /sbin/ldconfig %post -n libarmnnTfParser%{version_major}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnnTfParser%{version_major} -p /sbin/ldconfig %postun -n libarmnnTfParser%{version_major}%{?package_suffix} -p /sbin/ldconfig
%endif %endif
%if %{with armnn_caffe} %if %{with armnn_caffe}
%post -n libarmnnCaffeParser%{version_major} -p /sbin/ldconfig %post -n libarmnnCaffeParser%{version_major}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnnCaffeParser%{version_major} -p /sbin/ldconfig %postun -n libarmnnCaffeParser%{version_major}%{?package_suffix} -p /sbin/ldconfig
%endif %endif
@ -338,6 +413,12 @@ LD_LIBRARY_PATH="$(pwd)/build/" \
%defattr(-,root,root) %defattr(-,root,root)
%doc README.md %doc README.md
%license LICENSE %license LICENSE
%if %{with armnn_tests}
%{_bindir}/ExecuteNetwork
%if %{with armnn_caffe}
%{_bindir}/Caffe*-Armnn
%{_bindir}/MultipleNetworksCifar10
%endif
%if %{with armnn_flatbuffers} %if %{with armnn_flatbuffers}
%{_bindir}/TfLite*-Armnn %{_bindir}/TfLite*-Armnn
%{_bindir}/Image*Generator %{_bindir}/Image*Generator
@ -345,35 +426,29 @@ LD_LIBRARY_PATH="$(pwd)/build/" \
%if %{with armnn_tf} %if %{with armnn_tf}
%{_bindir}/Tf*-Armnn %{_bindir}/Tf*-Armnn
%endif %endif
%if %{with armnn_tests}
%{_bindir}/ExecuteNetwork
%if %{with armnn_caffe}
%{_bindir}/Caffe*-Armnn
%{_bindir}/MultipleNetworksCifar10
%endif
%endif
%if %{with armnn_flatbuffers} %if %{with armnn_flatbuffers}
%{_bindir}/SimpleSample %{_bindir}/SimpleSample
%endif %endif
%endif
%files -n lib%{name}%{version_major} %files -n libarmnn%{version_major}%{?package_suffix}
%{_libdir}/lib%{name}.so.* %{_libdir}/libarmnn.so.*
%if %{with armnn_flatbuffers} %if %{with armnn_flatbuffers}
%files -n libarmnnSerializer%{version_major} %files -n libarmnnSerializer%{version_major}%{?package_suffix}
%{_libdir}/libarmnnSerializer.so.* %{_libdir}/libarmnnSerializer.so.*
%files -n libarmnnTfLiteParser%{version_major} %files -n libarmnnTfLiteParser%{version_major}%{?package_suffix}
%{_libdir}/libarmnnTfLiteParser.so.* %{_libdir}/libarmnnTfLiteParser.so.*
%endif %endif
%if %{with armnn_tf} %if %{with armnn_tf}
%files -n libarmnnTfParser%{version_major} %files -n libarmnnTfParser%{version_major}%{?package_suffix}
%{_libdir}/libarmnnTfParser.so.* %{_libdir}/libarmnnTfParser.so.*
%endif %endif
%if %{with armnn_caffe} %if %{with armnn_caffe}
%files -n libarmnnCaffeParser%{version_major} %files -n libarmnnCaffeParser%{version_major}%{?package_suffix}
%{_libdir}/libarmnnCaffeParser.so.* %{_libdir}/libarmnnCaffeParser.so.*
%endif %endif