Guillaume GARDET 2022-10-14 12:37:56 +00:00 committed by Git OBS Bridge
parent 8a6803a6fe
commit c559658e25
12 changed files with 532 additions and 799 deletions

View File

@ -3,6 +3,8 @@ From: Qin Su <qsu@ti.com>
Date: Fri, 22 Feb 2019 14:10:07 -0500
Subject: [PATCH] add more test command line arguments
Updated by Guillaume_G to apply properly (s/BOOST_ASSERT/ARMNN_ASSERT/)
Upstream-Status: Inappropriate [TI only test code]
Signed-off-by: Qin Su <qsu@ti.com>
---
@ -14,9 +16,9 @@ index 538720b..6fd21b8 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -326,6 +326,55 @@ int ClassifierInferenceTestMain(int argc,
BOOST_ASSERT(modelFilename);
BOOST_ASSERT(inputBindingName);
BOOST_ASSERT(outputBindingName);
ARMNN_ASSERT(modelFilename);
ARMNN_ASSERT(inputBindingName);
ARMNN_ASSERT(outputBindingName);
+ int count;
+ const char *p_input;
+ char inmodelname[500];

View File

@ -6,6 +6,8 @@ Subject: [PATCH] add armnn mobilenet test example
Upstream-Status: Inappropriate [TI only test code]
Signed-off-by: Qin Su <qsu@ti.com>
Signed-off-by: Djordje Senicic <x0157990@ti.com>
[Guillaume's update: Add boost_log dep]
[Guillaume's update: Update to apply on top of 20.08]
---
tests/CMakeLists.txt | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
@ -21,8 +23,8 @@ index dfcf4b48..5a78d3a6 100644
# UnitTests
include(CheckIncludeFiles)
@@ -348,3 +351,41 @@ if(BUILD_ARMNN_QUANTIZER)
add_executable_ex(ImageCSVFileGenerator ${ImageCSVFileGenerator_sources})
@@ -348,3 +351,42 @@ if(BUILD_ARMNN_QUANTIZER)
target_include_directories(ImageCSVFileGenerator PRIVATE ../src/armnnUtils)
ImageTensorExecutor(ImageCSVFileGenerator)
endif()
+
@ -57,6 +59,7 @@ index dfcf4b48..5a78d3a6 100644
+ endif()
+
+ target_link_libraries(ArmnnExamples
+ ${Boost_LOG_LIBRARY}
+ ${Boost_SYSTEM_LIBRARY}
+ ${Boost_FILESYSTEM_LIBRARY}
+ ${Boost_PROGRAM_OPTIONS_LIBRARY}

View File

@ -5,6 +5,11 @@ Subject: [PATCH] armnn mobilenet test example
Upstream-Status: Inappropriate [TI only test code]
Signed-off-by: Qin Su <qsu@ti.com>
[Guillaume's update: s#Logging.hpp#armnn/Logging.hpp#]
[Guillaume's update: Add #include <boost/log/trivial.hpp>]
[Guillaume's update: Drop armnnUtils::ConfigureLogging(...)]
[Guillaume's update: Handle boost::variant to mapbox::util::variant update]
---
tests/ArmnnExamples/ArmnnExamples.cpp | 654 ++++++++++++++++++++++++++++++++++
1 file changed, 654 insertions(+)
@ -47,6 +52,7 @@ index 0000000..53a11cc
+// See LICENSE file in the project root for full license information.
+//
+#include <armnn/ArmNN.hpp>
+#include <boost/log/trivial.hpp>
+
+#include <utility>
+#include <armnn/TypesUtils.hpp>
@ -63,9 +69,9 @@ index 0000000..53a11cc
+#if defined(ARMNN_ONNX_PARSER)
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#endif
+#include "CsvReader.hpp"
+#include <mapbox/variant.hpp> /*#include "CsvReader.hpp"*/
+#include "../InferenceTest.hpp"
+#include <Logging.hpp>
+#include <armnn/Logging.hpp>
+#include <Profiling.hpp>
+
+#include <boost/algorithm/string/trim.hpp>
@ -312,7 +318,7 @@ index 0000000..53a11cc
+ // Loads input tensor.
+ std::vector<uint8_t> input;
+ std::vector<float> input_resized;
+ using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+ using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+
+ try
+ {
@ -392,7 +398,7 @@ index 0000000..53a11cc
+ double preformance_ret = static_cast<double>(1.0/timeTakenS);
+
+ //retrieve output
+ std::vector<float>& outputData = (boost::get<std::vector<float>>(outputDataContainers[0]));
+ std::vector<float>& outputData = (mapbox::util::get<std::vector<float>>(outputDataContainers[0]));
+ //output TOP predictions
+ std::string predict_target_name;
+ // find the out with the highest confidence
@ -583,7 +589,6 @@ index 0000000..53a11cc
+ armnn::LogSeverity level = armnn::LogSeverity::Debug;
+#endif
+ armnn::ConfigureLogging(true, true, level);
+ armnnUtils::ConfigureLogging(boost::log::core::get().get(), true, true, level);
+
+ std::string testCasesFile;
+

View File

@ -1,31 +0,0 @@
From 38e8e4bc03a4c1ee801f6af50be94ccd971bf3eb Mon Sep 17 00:00:00 2001
From: Qin Su <qsu@ti.com>
Date: Tue, 27 Nov 2018 18:15:49 -0500
Subject: [PATCH] enable use of arm compute shared library
Upstream-Status: Inappropriate [configuration]
Signed-off-by: Qin Su <qsu@ti.com>
---
cmake/GlobalConfig.cmake | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/cmake/GlobalConfig.cmake b/cmake/GlobalConfig.cmake
index 491d87a..4cf40a2 100644
--- a/cmake/GlobalConfig.cmake
+++ b/cmake/GlobalConfig.cmake
@@ -285,6 +285,11 @@ if(ARMCOMPUTENEON OR ARMCOMPUTECL)
find_library(ARMCOMPUTE_CORE_LIBRARY_DEBUG NAMES arm_compute_core-static)
find_library(ARMCOMPUTE_CORE_LIBRARY_RELEASE NAMES arm_compute_core-static)
+ find_library(ARMCOMPUTE_LIBRARY_DEBUG NAMES arm_compute)
+ find_library(ARMCOMPUTE_LIBRARY_RELEASE NAMES arm_compute)
+ find_library(ARMCOMPUTE_CORE_LIBRARY_DEBUG NAMES arm_compute_core)
+ find_library(ARMCOMPUTE_CORE_LIBRARY_RELEASE NAMES arm_compute_core)
+
set(ARMCOMPUTE_LIBRARIES
debug ${ARMCOMPUTE_LIBRARY_DEBUG} ${ARMCOMPUTE_CORE_LIBRARY_DEBUG}
optimized ${ARMCOMPUTE_LIBRARY_RELEASE} ${ARMCOMPUTE_CORE_LIBRARY_RELEASE} )
--
1.9.1

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ece7270a661fc38e913b44289269af92f208daf387e8de3f68684783e1e83a71
size 1095965

3
armnn-22.02.tar.gz Normal file
View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b11fe8e4af3a178a1fd7405950e444e1358057921b97c864b91fcadd6ae41716
size 27958237

View File

@ -1,79 +0,0 @@
From d9f7c8ba3949823a623b407f4bd80d120ca0b5be Mon Sep 17 00:00:00 2001
From: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Date: Fri, 13 Sep 2019 13:37:03 +0100
Subject: [PATCH] IVGCVSW-3858 Fix RefTensorHandleTests on Raspberry Pi
* Fix alignment check to use sizeof(size_t) instead of a hard-coded value
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I092c4464c6cecb2403da9b7744b68ad063ddbad1
---
src/backends/backendsCommon/test/EndToEndTestImpl.hpp | 6 ++----
src/backends/reference/RefTensorHandle.cpp | 5 +++--
src/backends/reference/test/RefTensorHandleTests.cpp | 10 ++++++----
3 files changed, 11 insertions(+), 10 deletions(-)
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 8a3e44fc..040782bf 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -210,14 +210,12 @@ inline void ImportNonAlignedPointerTest(std::vector<BackendId> backends)
};
// Misaligned input
- float * misalignedInputData = inputData.data();
- misalignedInputData++;
+ float* misalignedInputData = reinterpret_cast<float*>(reinterpret_cast<char*>(inputData.data()) + 1);
std::vector<float> outputData(5);
// Misaligned output
- float * misalignedOutputData = outputData.data();
- misalignedOutputData++;
+ float* misalignedOutputData = reinterpret_cast<float*>(reinterpret_cast<char*>(outputData.data()) + 1);
InputTensors inputTensors
{
diff --git a/src/backends/reference/RefTensorHandle.cpp b/src/backends/reference/RefTensorHandle.cpp
index 42ac7f08..84a74edc 100644
--- a/src/backends/reference/RefTensorHandle.cpp
+++ b/src/backends/reference/RefTensorHandle.cpp
@@ -110,8 +110,9 @@ bool RefTensorHandle::Import(void* memory, MemorySource source)
{
if (source == MemorySource::Malloc)
{
- // Checks the 16 byte memory alignment.
- if (reinterpret_cast<uint64_t>(memory) % 16)
+ // Check memory alignment
+ constexpr uintptr_t alignment = sizeof(size_t);
+ if (reinterpret_cast<uintptr_t>(memory) % alignment)
{
if (m_Imported)
{
diff --git a/src/backends/reference/test/RefTensorHandleTests.cpp b/src/backends/reference/test/RefTensorHandleTests.cpp
index 2c5d6d49..be229bf8 100644
--- a/src/backends/reference/test/RefTensorHandleTests.cpp
+++ b/src/backends/reference/test/RefTensorHandleTests.cpp
@@ -92,15 +92,17 @@ BOOST_AUTO_TEST_CASE(MisalignedPointer)
TensorInfo info({2}, DataType::Float32);
RefTensorHandle handle(info, memoryManager, static_cast<unsigned int>(MemorySource::Malloc));
- // Allocates a 2 int array
+ // Allocate a 2 int array
int* testPtr = new int[2];
- int* misalignedPtr = testPtr + 1;
- BOOST_CHECK(!handle.Import(static_cast<void *>(misalignedPtr), MemorySource::Malloc));
+ // Increment pointer by 1 byte
+ void* misalignedPtr = static_cast<void*>(reinterpret_cast<char*>(testPtr) + 1);
+
+ BOOST_CHECK(!handle.Import(misalignedPtr, MemorySource::Malloc));
delete[] testPtr;
}
#endif
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+BOOST_AUTO_TEST_SUITE_END()

View File

@ -1,284 +0,0 @@
From dcaa6109c95034aa3b945acd50a2882e40f13370 Mon Sep 17 00:00:00 2001
From: Ferran Balaguer <ferran.balaguer@arm.com>
Date: Wed, 21 Aug 2019 13:28:38 +0100
Subject: [PATCH] IVGCVSW-3175 Add Regression Tests for Zero Copy
Signed-off-by: Ferran Balaguer <ferran.balaguer@arm.com>
Change-Id: I6f16ea0dca359283a3b187e2f046f82a7dc2ff7c
---
.../backendsCommon/test/EndToEndTestImpl.hpp | 153 ++++++++++++++++++
.../reference/test/RefEndToEndTests.cpp | 86 ++--------
2 files changed, 167 insertions(+), 72 deletions(-)
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index f8673d69..8a3e44fc 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -8,6 +8,7 @@
#include <armnn/ArmNN.hpp>
#include <armnn/INetwork.hpp>
+#include <Profiling.hpp>
#include <backendsCommon/test/QuantizeHelper.hpp>
@@ -171,4 +172,156 @@ void EndToEndLayerTestImpl(INetworkPtr network,
}
}
+inline void ImportNonAlignedPointerTest(std::vector<BackendId> backends)
+{
+ using namespace armnn;
+
+ // Create runtime in which test will run
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ // build up the structure of the network
+ INetworkPtr net(INetwork::Create());
+
+ IConnectableLayer* input = net->AddInputLayer(0);
+
+ NormalizationDescriptor descriptor;
+ IConnectableLayer* norm = net->AddNormalizationLayer(descriptor);
+
+ IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input->GetOutputSlot(0).Connect(norm->GetInputSlot(0));
+ norm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
+ norm->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
+
+ // Optimize the network
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+
+ // Loads it into the runtime.
+ NetworkId netId;
+ runtime->LoadNetwork(netId, std::move(optNet));
+
+ // Creates structures for input & output
+ std::vector<float> inputData
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f
+ };
+
+ // Misaligned input
+ float * misalignedInputData = inputData.data();
+ misalignedInputData++;
+
+ std::vector<float> outputData(5);
+
+ // Misaligned output
+ float * misalignedOutputData = outputData.data();
+ misalignedOutputData++;
+
+ InputTensors inputTensors
+ {
+ {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), misalignedInputData)},
+ };
+ OutputTensors outputTensors
+ {
+ {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), misalignedOutputData)}
+ };
+
+ // The result of the inference is not important, just the fact that there
+ // should not be CopyMemGeneric workloads.
+ runtime->GetProfiler(netId)->EnableProfiling(true);
+
+ // Do the inference
+ runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+ // Retrieve the Profiler.Print() output to get the workload execution
+ ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
+ std::stringstream ss;
+ profilerManager.GetProfiler()->Print(ss);;
+ std::string dump = ss.str();
+
+ // Contains RefNormalizationWorkload
+ std::size_t found = dump.find("RefNormalizationWorkload");
+ BOOST_TEST(found != std::string::npos);
+ // No Contains SyncMemGeneric (Created when importing the output tensor handle)
+ found = dump.find("SyncMemGeneric");
+ BOOST_TEST(found == std::string::npos);
+ // Contains CopyMemGeneric
+ found = dump.find("CopyMemGeneric");
+ BOOST_TEST(found != std::string::npos);
+}
+
+inline void ImportAlignedPointerTest(std::vector<BackendId> backends)
+{
+ using namespace armnn;
+
+ // Create runtime in which test will run
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ // build up the structure of the network
+ INetworkPtr net(INetwork::Create());
+
+ IConnectableLayer* input = net->AddInputLayer(0);
+
+ NormalizationDescriptor descriptor;
+ IConnectableLayer* norm = net->AddNormalizationLayer(descriptor);
+
+ IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input->GetOutputSlot(0).Connect(norm->GetInputSlot(0));
+ norm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
+ norm->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
+
+ // Optimize the network
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+
+ // Loads it into the runtime.
+ NetworkId netId;
+ runtime->LoadNetwork(netId, std::move(optNet));
+
+ // Creates structures for input & output
+ std::vector<float> inputData
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+
+ std::vector<float> outputData(4);
+
+ InputTensors inputTensors
+ {
+ {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
+ };
+ OutputTensors outputTensors
+ {
+ {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
+ };
+
+ // The result of the inference is not important, just the fact that there
+ // should not be CopyMemGeneric workloads.
+ runtime->GetProfiler(netId)->EnableProfiling(true);
+
+ // Do the inference
+ runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+ // Retrieve the Profiler.Print() output to get the workload execution
+ ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
+ std::stringstream ss;
+ profilerManager.GetProfiler()->Print(ss);;
+ std::string dump = ss.str();
+
+ // Contains RefNormalizationWorkload
+ std::size_t found = dump.find("RefNormalizationWorkload");
+ BOOST_TEST(found != std::string::npos);
+ // Contains SyncMemGeneric
+ found = dump.find("SyncMemGeneric");
+ BOOST_TEST(found != std::string::npos);
+ // No contains CopyMemGeneric
+ found = dump.find("CopyMemGeneric");
+ BOOST_TEST(found == std::string::npos);
+}
+
} // anonymous namespace
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 31e9b339..ee42c9e9 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -322,78 +322,6 @@ BOOST_AUTO_TEST_CASE(TrivialMin)
BOOST_TEST(outputData[3] == 2);
}
-BOOST_AUTO_TEST_CASE(RefNoCopyWorkloads)
-{
- using namespace armnn;
-
- // Create runtime in which test will run
- IRuntime::CreationOptions options;
- IRuntimePtr runtime(armnn::IRuntime::Create(options));
-
- // build up the structure of the network
- INetworkPtr net(INetwork::Create());
-
- IConnectableLayer* input = net->AddInputLayer(0);
-
- NormalizationDescriptor descriptor;
- IConnectableLayer* norm = net->AddNormalizationLayer(descriptor);
-
- IConnectableLayer* output = net->AddOutputLayer(0);
-
- input->GetOutputSlot(0).Connect(norm->GetInputSlot(0));
- norm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
-
- input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
- norm->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
-
- // Optimize the network
- IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
-
- // Loads it into the runtime.
- NetworkId netId;
- runtime->LoadNetwork(netId, std::move(optNet));
-
- // Creates structures for input & output
- std::vector<float> inputData
- {
- 1.0f, 2.0f, 3.0f, 4.0f
- };
-
- std::vector<float> outputData(4);
-
- InputTensors inputTensors
- {
- {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
- };
- OutputTensors outputTensors
- {
- {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
- };
-
- // The result of the inference is not important, just the fact that there
- // should not be CopyMemGeneric workloads.
- runtime->GetProfiler(netId)->EnableProfiling(true);
-
- // Do the inference
- runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
-
- // Retrieve the Profiler.Print() output to get the workload execution
- ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
- std::stringstream ss;
- profilerManager.GetProfiler()->Print(ss);;
- std::string dump = ss.str();
-
- // Contains RefNormalizationWorkload
- std::size_t found = dump.find("RefNormalizationWorkload");
- BOOST_TEST(found != std::string::npos);
- // Contains SyncMemGeneric
- found = dump.find("SyncMemGeneric");
- BOOST_TEST(found != std::string::npos);
- // No contains CopyMemGeneric
- found = dump.find("CopyMemGeneric");
- BOOST_TEST(found == std::string::npos);
-}
-
BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndTest)
{
const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
@@ -1023,4 +951,18 @@ BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NhwcTest)
ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
}
+#if !defined(__ANDROID__)
+// Only run these tests on non Android platforms
+BOOST_AUTO_TEST_CASE(RefImportNonAlignedPointerTest)
+{
+ ImportNonAlignedPointerTest(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefImportAlignedPointerTest)
+{
+ ImportAlignedPointerTest(defaultBackends);
+}
+
+#endif
+
BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file

View File

@ -1,10 +0,0 @@
--- armnn-19.02.orig/src/armnn/test/TensorHelpers.hpp 2019-03-26 16:08:56.403605790 +0100
+++ armnn-19.02/src/armnn/test/TensorHelpers.hpp 2019-03-26 16:09:58.448265384 +0100
@@ -13,6 +13,7 @@
#include <boost/assert.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
#include <boost/random/uniform_real_distribution.hpp>
+#define BOOST_ALLOW_DEPRECATED_HEADERS
#include <boost/random/mersenne_twister.hpp>
#include <boost/numeric/conversion/cast.hpp>

View File

@ -1,119 +0,0 @@
diff --git a/CMakeLists.txt b/CMakeLists.txt
index fc68f3af..3616ae8e 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -7,6 +7,7 @@ project(armnn)
set(additional_cmake_files)
list(APPEND additional_cmake_files
+ cmake/armnnVersion.cmake
cmake/Utils.cmake
cmake/GlobalConfig.cmake
cmake/AddDllCopyCommands.cmake)
@@ -15,6 +16,21 @@ foreach(cmake_file ${additional_cmake_files})
include(${cmake_file})
endforeach()
+
+# Define LIB version
+set(GENERIC_LIB_VERSION "${ARMNN_MAJOR_VERSION}.${ARMNN_MINOR_VERSION}")
+# Define LIB soversion
+set(GENERIC_LIB_SOVERSION "${ARMNN_MAJOR_VERSION}")
+# Define _ARMNN_VERSION string to be used in include/armnn/Version.hpp for ARMNN_VERSION string
+set(_ARMNN_VERSION
+ "20${ARMNN_MAJOR_VERSION}${ARMNN_MINOR_VERSION}${ARMNN_BUILD_VERSION}")
+if(${CMAKE_VERSION} VERSION_LESS "3.12.0")
+ add_definitions(-D_ARMNN_VERSION=${_ARMNN_VERSION})
+else()
+ add_compile_definitions(_ARMNN_VERSION=${_ARMNN_VERSION})
+endif()
+
+
if (DYNAMIC_BACKEND_PATHS)
# It's expected to have the format: DYNAMIC_BACKEND_PATHS="PATH_1:PATH_2...:PATH_N"
add_definitions('-DDYNAMIC_BACKEND_PATHS="${DYNAMIC_BACKEND_PATHS}"')
@@ -95,6 +111,7 @@ if(BUILD_CAFFE_PARSER)
target_link_libraries(armnnCaffeParser armnn)
target_link_libraries(armnnCaffeParser ${PROTOBUF_LIBRARIES})
+ set_target_properties(armnnCaffeParser PROPERTIES VERSION ${GENERIC_LIB_VERSION} SOVERSION ${GENERIC_LIB_SOVERSION} )
endif()
@@ -119,6 +136,7 @@ if(BUILD_ONNX_PARSER)
# Protobuf
target_link_libraries(armnnOnnxParser ${PROTOBUF_LIBRARIES})
+ set_target_properties(armnnOnnxParser PROPERTIES VERSION ${GENERIC_LIB_VERSION} SOVERSION ${GENERIC_LIB_SOVERSION} )
endif()
if(BUILD_TF_PARSER)
@@ -142,6 +160,7 @@ if(BUILD_TF_PARSER)
# Protobuf (use the specific version tensorflow wants)
target_link_libraries(armnnTfParser ${PROTOBUF_LIBRARIES})
+ set_target_properties(armnnTfParser PROPERTIES VERSION ${GENERIC_LIB_VERSION} SOVERSION ${GENERIC_LIB_SOVERSION} )
endif()
if(BUILD_ARMNN_QUANTIZER AND ARMNNREF)
@@ -200,6 +219,8 @@ if(BUILD_ARMNN_QUANTIZER AND ARMNNREF)
target_link_libraries(ArmnnQuantizer pthread)
endif()
+ set_target_properties(ArmnnQuantizer PROPERTIES VERSION ${GENERIC_LIB_VERSION} SOVERSION ${GENERIC_LIB_SOVERSION} )
+
endif()
@@ -539,6 +560,7 @@ endif()
if(PROFILING_BACKEND_STREAMLINE)
target_link_libraries(armnn pthread)
endif()
+set_target_properties( armnn PROPERTIES VERSION ${GENERIC_LIB_VERSION} SOVERSION ${GENERIC_LIB_SOVERSION} )
if(BUILD_UNIT_TESTS)
set(unittest_sources)
diff --git a/cmake/armnnVersion.cmake b/cmake/armnnVersion.cmake
new file mode 100644
index 00000000..68783370
--- /dev/null
+++ b/cmake/armnnVersion.cmake
@@ -0,0 +1,4 @@
+# ArmNN version number components.
+set(ARMNN_MAJOR_VERSION 19)
+set(ARMNN_MINOR_VERSION 08)
+set(ARMNN_BUILD_VERSION 00)
diff --git a/include/armnn/Version.hpp b/include/armnn/Version.hpp
index 9d73cd54..735be6b9 100644
--- a/include/armnn/Version.hpp
+++ b/include/armnn/Version.hpp
@@ -9,4 +9,5 @@
// YYYY = 4-digit year number
// MM = 2-digit month number
// PP = 2-digit patch number
-#define ARMNN_VERSION "20190800"
+// Defined in CMakeLists.txt
+#define ARMNN_VERSION _ARMNN_VERSION
diff --git a/src/armnnSerializer/CMakeLists.txt b/src/armnnSerializer/CMakeLists.txt
index 225999bb..ccc924e6 100755
--- a/src/armnnSerializer/CMakeLists.txt
+++ b/src/armnnSerializer/CMakeLists.txt
@@ -43,4 +43,5 @@ if(BUILD_ARMNN_SERIALIZER)
install(TARGETS armnnSerializer
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
+ set_target_properties(armnnSerializer PROPERTIES VERSION ${GENERIC_LIB_VERSION} SOVERSION ${GENERIC_LIB_SOVERSION} )
endif()
diff --git a/src/armnnTfLiteParser/CMakeLists.txt b/src/armnnTfLiteParser/CMakeLists.txt
index 17d4cf68..ae60079a 100755
--- a/src/armnnTfLiteParser/CMakeLists.txt
+++ b/src/armnnTfLiteParser/CMakeLists.txt
@@ -21,6 +21,8 @@ if(BUILD_TF_LITE_PARSER)
target_link_libraries(armnnTfLiteParser ${Boost_FILESYSTEM_LIBRARY} ${Boost_THREAD_LIBRARY})
target_link_libraries(armnnTfLiteParser armnn ${FLATBUFFERS_LIBRARY})
+ set_target_properties(armnnTfLiteParser PROPERTIES VERSION ${GENERIC_LIB_VERSION} SOVERSION ${GENERIC_LIB_SOVERSION} )
+
install(TARGETS armnnTfLiteParser
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})

View File

@ -1,3 +1,274 @@
-------------------------------------------------------------------
Tue Mar 15 13:06:29 UTC 2022 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Update to 22.02:
* Changelog: https://github.com/ARM-software/armnn/releases/tag/v22.02
* Add libarmnnTestUtils
-------------------------------------------------------------------
Wed Dec 1 10:03:31 UTC 2021 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Update to 21.11:
* Changelog: https://github.com/ARM-software/armnn/releases/tag/v21.11
* Main changes:
- New capabilities and improve performance
- ABI/API Changes
The following front-end API changes have occurred during the
implementation of 21.11 that users should be aware of before
upgrading. Due to these changes we have bumped our ARMNN_VERSION
to 27.0.0, the Delegate to 25.0.0 and also bumping
our Parsers to 24.3.0 following Semantic Versioning guidelines.
- Drop upstream patches:
* 96beb97.diff
* febc20f.diff
* e118e04.diff
* 0011-update-doctest-for-glibc2.34.patch
-------------------------------------------------------------------
Mon Oct 25 07:21:27 UTC 2021 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Add upstream patch to fix stringop-overread error:
* e118e04.diff
-------------------------------------------------------------------
Thu Oct 21 15:24:42 UTC 2021 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Add upstream patch to fix uninitialized var error:
* febc20f.diff
- Remove most -Wno-error* flags which are not needed anymore
-------------------------------------------------------------------
Wed Oct 20 08:41:14 UTC 2021 - Atri Bhattacharya <badshah400@gmail.com>
- Add 0011-update-doctest-for-glibc2.34.patch: Update doctest to
version 2.4.6 to allow Arm NN to build with glibc 2.34; patch
taken from upstream commits 13d2e0d and 4ec6d42and rebased for
current version.
-------------------------------------------------------------------
Thu Sep 9 08:04:50 UTC 2021 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Update to 21.08:
* Changelog: https://github.com/ARM-software/armnn/releases/tag/v21.08
* Main changes:
- new capabilities and improve performance:
* Added the ability to import protected DMA Buffers and
allow Arm NN to run inferences that are in Protected GPU Memory.
As well as providing Custom Memory Allocator which supports
importing malloc, Dma_buf and protected Dma buffers.
* Users with multi core NPUs has been given the ability to pin
inferences to selected cores giving them the ability to balance
parallel workloads across the NPU and increase throughput.
* Boost has been completely removed from the code base making
Arm NN easier to integrate into other software stacks.
* Added support for non-constant weights and biases on
FullyConnected which lay the groundwork for supporting more models.
* More operators supported on Arm NN, TfLite Parser, TfLite
Delegate and Android NNAPI driver.
- Drop upstream patch:
* armnn-fix-include.patch
-------------------------------------------------------------------
Fri Jul 16 08:23:43 UTC 2021 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Add '-Wno-error=stringop-overread' to workaround build error
with GCC11 with openCL enabled
-------------------------------------------------------------------
Tue Jul 13 08:07:14 UTC 2021 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Add '-Wno-error=uninitialized -Wno-error=array-bounds' to
workaround build issues with GCC11 - https://github.com/ARM-software/armnn/issues/548
-------------------------------------------------------------------
Tue May 25 06:49:01 UTC 2021 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Fix libarmnnOnnxParser version
-------------------------------------------------------------------
Fri May 21 06:55:48 UTC 2021 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Update to 21.05:
* Changelog: https://github.com/ARM-software/armnn/releases/tag/v21.05
* Main changes:
- new capabilities to allow users attain higher performance by:
* Making the Arm NN Core thread safe opening the possibility of
running multiple inferences on the same model in parallel
software threads.
* Allowing graphs on the GPU backend import their input and
output buffers either from correctly aligned main memory or
from kernel memory exposed as a dma_buf, thus reducing memory
usage and saving the time involved in copying data into and
out of the GPU memory space.
- In addition to this, support was added to allow the MobileBERT
network to be parsed and run.
- Finally three deprecated components: the Tensorflow Parser,
the Caffe Parser and the Arm NN Quantizer tool, were removed
- Add patch to fix include path:
* armnn-fix-include.patch
- Disable armnn-extratests as it fails to build with current version
-------------------------------------------------------------------
Mon Mar 29 12:17:08 UTC 2021 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Update to 21.02:
* Changelog: https://github.com/ARM-software/armnn/releases/tag/v21.02
- Drop upstream patch:
* c5c40fe.diff
-------------------------------------------------------------------
Mon Feb 15 09:48:06 UTC 2021 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Fix instalaltion of cmake files with an upstream patch:
* c5c40fe.diff
-------------------------------------------------------------------
Mon Jan 25 08:21:39 UTC 2021 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Backport patch to fix tflite parser when built against
tensorflow 2.4:
* 96beb97.diff
-------------------------------------------------------------------
Wed Dec 9 14:33:19 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Update to 20.11:
* Changelog: https://github.com/ARM-software/armnn/releases/tag/v20.11
- Refresh patch to handle boost::variant to mapbox::util::variant update:
* 0006-armnn-mobilenet-test-example.patch
-------------------------------------------------------------------
Fri Sep 4 16:07:09 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Add python support, disabled for now as it does not install
files atm
-------------------------------------------------------------------
Fri Sep 4 14:56:36 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Package libtimelineDecoderJson
-------------------------------------------------------------------
Thu Sep 3 11:18:40 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Update to 20.08:
* Changelog: https://github.com/ARM-software/armnn/releases/tag/v20.08
- Refresh patch:
* 0005-add-armnn-mobilenet-test-example.patch
- Drop upstream patch:
* armnn-fix-catch.patch
- Disable ONNX on Tumbleweed since there is a compatibility issue
with ONNX 1.7.0, see: https://github.com/ARM-software/armnn/issues/419
-------------------------------------------------------------------
Wed Jun 17 07:19:10 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Build only where ComputeLibrary is built:
aarch64 armv7 and x86_64
-------------------------------------------------------------------
Thu Jun 4 09:51:23 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Unify Leap/SLE options
- Update to 20.05:
* Changelog: https://github.com/ARM-software/armnn/releases/tag/v20.05
- Drop upstream patch:
* armnn-enable-use-of-arm-compute-shared-library.patch
- Refresh patch:
* 0003-add-more-test-command-line-arguments.patch
- Add new patch to fix build with gcc10:
* armnn-fix-catch.patch
- Package new libtimelineDecode and libarmnnBasePipeServer
-------------------------------------------------------------------
Thu May 28 15:10:11 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Apply boost link fixes only for armnn_extra_tests
- Apply patches for armnn_extra_tests only when
armnn_extra_tests is built
-------------------------------------------------------------------
Tue Mar 31 15:23:29 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Disable RPATH to fix build on Leap15.2/SLE15SP2
-------------------------------------------------------------------
Tue Mar 24 14:01:29 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Add '-Wno-error=deprecated-declarations' to fix build with latest
protobuf 3.11.x (Tumbleweed)
https://github.com/ARM-software/armnn/issues/366
- Use python-onnx package instead of python-onnx-devel, since it
is now a single package.
-------------------------------------------------------------------
Wed Mar 11 08:41:20 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Fix build on Leap 15.2
-------------------------------------------------------------------
Mon Mar 9 17:47:42 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Update to 20.02:
* Changelog: https://github.com/ARM-software/armnn/releases/tag/v20.02
* Drop upstream patch:
- armnn-fix_include.patch
* Refresh patch:
- 0006-armnn-mobilenet-test-example.patch
* Replace downstream patch:
- 0007-enable-use-of-arm-compute-shared-library.patch
by upstream patch:
- armnn-enable-use-of-arm-compute-shared-library.patch
* Add 'vim' as BuildRequires since it needs 'xxd' tool
- Use TensorFlow 2 also on Leap 15.2 / SLE15SP2
-------------------------------------------------------------------
Mon Feb 10 13:01:53 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Use Tensorflow 2.x only for Tumbleweed, as TensorFlow2 is not
available in Leap 15.2
- Skip some tests if TensorFlow < 1.14 is used
-------------------------------------------------------------------
Tue Jan 28 12:33:19 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Enable ONNX on Leap 15.2
-------------------------------------------------------------------
Tue Jan 28 09:48:55 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Use tensorflow2 as armnn 19.11 now requires TensorFlow >= 1.14
-------------------------------------------------------------------
Mon Jan 20 15:21:09 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Disable ArmnnConverter and ArmnnQuantizer packaging by default
as it requires libQuantizer.so to be packaged, which is not yet
-------------------------------------------------------------------
Tue Jan 14 13:07:58 UTC 2020 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Package ArmnnConverter and ArmnnQuantizer
-------------------------------------------------------------------
Tue Jan 14 11:59:14 UTC 2020 - Dominique Leuenberger <dimstar@opensuse.org>
- ExcludeArch %ix86: Parts of the build deps (tensorflow/bazel) are
not supported on ix86.
-------------------------------------------------------------------
Fri Dec 6 13:45:57 UTC 2019 - Guillaume GARDET <guillaume.gardet@opensuse.org>
- Update to 19.11:
* Changelog: https://github.com/ARM-software/armnn/releases/tag/v19.11
- Remove upstreamed patches:
* armnn-generate-versioned-library.patch
* armnn-fix_boost.patch
* armnn-fix_arm32_dep.patch
* armnn-fix_arm32.patch
- Add patch to fix include:
* armnn-fix_include.patch
-------------------------------------------------------------------
Mon Nov 25 14:16:29 UTC 2019 - Guillaume GARDET <guillaume.gardet@opensuse.org>

View File

@ -1,7 +1,7 @@
#
# spec file for package armnn
# spec file
#
# Copyright (c) 2018 SUSE LINUX GmbH, Nuernberg, Germany.
# Copyright (c) 2022 SUSE LLC
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
@ -12,94 +12,71 @@
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.
# Please submit bugfixes or comments via http://bugs.opensuse.org/
# Please submit bugfixes or comments via https://bugs.opensuse.org/
#
%define target @BUILD_FLAVOR@%{nil}
# Disable LTO until lto link is fixed - https://github.com/ARM-software/armnn/issues/251
# Disable LTO until UnitTests passes with LTO enabled - https://github.com/ARM-software/armnn/issues/623
%define _lto_cflags %{nil}
# Disable Python binding for now
%bcond_with PyArmnn
%define target @BUILD_FLAVOR@%{nil}
%if "%{target}" != ""
%define package_suffix -%{target}
%endif
# Compute library has neon enabled for aarch64 only
%ifarch aarch64
%bcond_without compute_neon
%else
%bcond_with compute_neon
%endif
%if "%{target}" == "opencl"
%bcond_without compute_cl
%else
%bcond_with compute_cl
%endif
# stb-devel is available on Leap 15.1+
%if 0%{?suse_version} > 1500 || ( 0%{?sle_version} > 150000 && 0%{?is_opensuse} )
%bcond_without armnn_tests
%else
%bcond_with armnn_tests
%endif
# Extra tests require opencv(3)-devel, but it is broken for Leap 15.x - boo#1154091
%if 0%{?suse_version} > 1500
%bcond_without armnn_extra_tests
# Extra tests require opencv(3)-devel, but it is broken for Leap 15.1 - boo#1154091
%if 0%{?suse_version} > 1500 || 0%{?sle_version} >= 150200
# FIXME: disabled for now, as it fails since version 21.05
%bcond_with armnn_extra_tests
%else
%bcond_with armnn_extra_tests
%endif
# flatbuffers-devel is available on Leap 15.2+
%if 0%{?suse_version} > 1500 || ( 0%{?sle_version} >= 150200 && 0%{?is_opensuse} )
# flatbuffers-devel is available on Leap 15.2+/SLE15SP2+
%if 0%{?suse_version} > 1500 || 0%{?sle_version} >= 150200
%bcond_without armnn_flatbuffers
%else
%bcond_with armnn_flatbuffers
%endif
# Enable CAFFE
%bcond_without armnn_caffe
# Enable TensorFlow only on TW aarch64 and x86_64 (TF fails to build on Leap 15.x and on armv7 TW)
%if 0%{?suse_version} > 1500
%ifarch aarch64 x86_64
%bcond_without armnn_tf
%else
%bcond_with armnn_tf
%endif # ifarch
%else # suse_version
%bcond_with armnn_tf
%endif # suse_version
# ONNX is available on Tumbleweed only
%if 0%{?suse_version} > 1500
# ONNX is available on Leap 15.2+/SLE15SP2+, but there is a compatibility issue
# with ONNX 1.7.0 in Tumbleweed - https://github.com/ARM-software/armnn/issues/419
%if 0%{?sle_version} >= 150200
%bcond_without armnn_onnx
%else
%bcond_with armnn_onnx
%endif
%define version_major 19
%define version_minor 08
%define version_major 22
%define version_minor 02
%define version_lib 28
%define version_lib_tfliteparser 24
%define version_lib_onnxparser 24
Name: armnn%{?package_suffix}
Version: %{version_major}.%{version_minor}
Release: 0
Summary: Arm NN SDK enables machine learning workloads on power-efficient devices
License: MIT
Group: Development/Libraries/Other
Url: https://developer.arm.com/products/processors/machine-learning/arm-nn
URL: https://developer.arm.com/products/processors/machine-learning/arm-nn
Source0: https://github.com/ARM-software/armnn/archive/v%{version}.tar.gz#/armnn-%{version}.tar.gz
Source1: armnn-rpmlintrc
# PATCH-FIX-UPSTREAM - https://github.com/ARM-software/armnn/issues/275
Patch1: armnn-generate-versioned-library.patch
# Patch: http://arago-project.org/git/?p=meta-arago.git;a=blob;f=meta-arago-extras/recipes-support/armnn/armnn/0007-enable-use-of-arm-compute-shared-library.patch;hb=master
Patch2: 0007-enable-use-of-arm-compute-shared-library.patch
# PATCH-FIX-UPSTREAM - https://github.com/ARM-software/armnn/issues/274
Patch3: armnn-fix_boost.patch
# PATCH-FIX-UPSTREAM - https://github.com/ARM-software/armnn/issues/266
Patch4: armnn-fix_arm32_dep.patch
Patch5: armnn-fix_arm32.patch
# PATCHES to add downstream ArmnnExamples binary - https://layers.openembedded.org/layerindex/recipe/87610/
Patch200: 0003-add-more-test-command-line-arguments.patch
Patch201: 0005-add-armnn-mobilenet-test-example.patch
@ -107,31 +84,38 @@ Patch202: 0006-armnn-mobilenet-test-example.patch
Patch203: 0009-command-line-options-for-video-port-selection.patch
Patch204: 0010-armnnexamples-update-for-19.08-modifications.patch
Patch205: armnn-fix_find_opencv.patch
BuildRequires: ComputeLibrary-devel >= %{version_major}.%{version_minor}
BuildRequires: cmake >= 3.0.2
BuildRequires: gcc-c++
BuildRequires: protobuf-devel
BuildRequires: python-rpm-macros
BuildRequires: valgrind-devel
BuildRequires: vim
# Make armnn-opencl pulls lib*-opencl, and armnn pulls non opencl libs
Requires: libarmnn%{version_lib}%{?package_suffix} = %{version}
ExcludeArch: %ix86
%if 0%{?suse_version} < 1330
BuildRequires: boost-devel >= 1.59
%else
BuildRequires: libboost_filesystem-devel >= 1.59
BuildRequires: libboost_log-devel >= 1.59
BuildRequires: libboost_program_options-devel >= 1.59
BuildRequires: libboost_system-devel >= 1.59
BuildRequires: libboost_test-devel >= 1.59
%if %{with armnn_extra_tests}
BuildRequires: libboost_log-devel >= 1.59
BuildRequires: libboost_thread-devel >= 1.59
%endif
%if %{with armnn_caffe}
BuildRequires: caffe-devel
%endif
BuildRequires: cmake >= 3.0.2
BuildRequires: ComputeLibrary-devel >= 19.08
BuildRequires: gcc-c++
%if %{with armnn_flatbuffers}
BuildRequires: flatbuffers-devel
BuildRequires: tensorflow-lite-devel
BuildRequires: tensorflow2-lite-devel
%endif
%if %{with compute_cl}
# Mesa-libOpenCl is required for tests
BuildRequires: Mesa-libOpenCL
BuildRequires: ocl-icd-devel
BuildRequires: opencl-cpp-headers
BuildRequires: opencl-headers
%endif
%if %{with armnn_extra_tests}
%if 0%{?suse_version} > 1500
@ -141,34 +125,25 @@ BuildRequires: opencv-devel
%endif
%endif
%if %{with armnn_onnx}
BuildRequires: python3-onnx-devel
BuildRequires: python3-onnx
%endif
BuildRequires: protobuf-devel
BuildRequires: python-rpm-macros
%if %{with armnn_tests}
BuildRequires: stb-devel
%endif
%if %{with armnn_tf}
BuildRequires: tensorflow-devel
%if %{with PyArmnn}
BuildRequires: python3-devel
BuildRequires: python3-wheel
BuildRequires: swig >= 4
%endif
BuildRequires: valgrind-devel
%if %{with compute_cl}
Recommends: Mesa-libOpenCL
%endif
# Make armnn-opencl pulls lib*-opencl, and armnn pulls non opencl libs
Requires: libarmnn%{version_major}%{?package_suffix} = %{version}
%if %{with armnn_flatbuffers}
Requires: libarmnnSerializer%{version_major}%{?package_suffix} = %{version}
Requires: libarmnnTfLiteParser%{version_major}%{?package_suffix} = %{version}
%endif
%if %{with armnn_caffe}
Requires: libarmnnCaffeParser%{version_major}%{?package_suffix} = %{version}
Requires: libarmnnSerializer%{version_lib}%{?package_suffix} = %{version}
Requires: libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} = %{version}
%endif
%if %{with armnn_onnx}
Requires: libarmnnOnnxParser%{version_major}%{?package_suffix} = %{version}
%endif
%if %{with armnn_tf}
Requires: libarmnnTfParser%{version_major}%{?package_suffix} = %{version}
Requires: libarmnnOnnxParser%{version_lib_onnxparser}%{?package_suffix} = %{version}
%endif
# Make sure we do not install both openCL and non-openCL (CPU only) versions.
%if "%{target}" == "opencl"
@ -176,45 +151,43 @@ Conflicts: armnn
%else
Conflicts: armnn-opencl
%endif
BuildRoot: %{_tmppath}/%{name}-%{version}-build
ExclusiveArch: aarch64 armv7l armv7hl x86_64
%description
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow and Caffe, allowing them to run efficiently without
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow Lite, allowing them to run efficiently without
modification across Arm Cortex CPUs and Arm Mali GPUs.
%package devel
Summary: Development headers and libraries for armnn
# Make sure we do not install both openCL and non-openCL (CPU only) versions.
Group: Development/Libraries/C and C++
Requires: %{name} = %{version}
Requires: libarmnn%{version_lib}%{?package_suffix} = %{version}
Requires: libarmnnBasePipeServer%{version_lib}%{?package_suffix} = %{version}
Requires: libtimelineDecoder%{version_lib}%{?package_suffix} = %{version}
Requires: libtimelineDecoderJson%{version_lib}%{?package_suffix} = %{version}
# Make sure we do not install both openCL and non-openCL (CPU only) versions.
%if "%{target}" == "opencl"
Conflicts: armnn-devel
%else
Conflicts: armnn-opencl-devel
%endif
Requires: %{name} = %{version}
Requires: libarmnn%{version_major}%{?package_suffix} = %{version}
%if %{with armnn_flatbuffers}
Requires: libarmnnSerializer%{version_major}%{?package_suffix} = %{version}
Requires: libarmnnTfLiteParser%{version_major}%{?package_suffix} = %{version}
%endif
%if %{with armnn_caffe}
Requires: libarmnnCaffeParser%{version_major}%{?package_suffix} = %{version}
Requires: libarmnnSerializer%{version_lib}%{?package_suffix} = %{version}
Requires: libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} = %{version}
%endif
%if %{with armnn_onnx}
Requires: libarmnnOnnxParser%{version_major}%{?package_suffix} = %{version}
%endif
%if %{with armnn_tf}
Requires: libarmnnTfParser%{version_major}%{?package_suffix} = %{version}
Requires: libarmnnOnnxParser%{version_lib_onnxparser}%{?package_suffix} = %{version}
%endif
%description devel
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow and Caffe, allowing them to run efficiently without
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow Lite, allowing them to run efficiently without
modification across Arm Cortex CPUs and Arm Mali GPUs.
This package contains the development libraries and headers for armnn.
@ -222,136 +195,151 @@ This package contains the development libraries and headers for armnn.
%if %{with armnn_extra_tests}
%package -n %{name}-extratests
Summary: Additionnal downstream tests for Arm NN
# Make sure we do not install both openCL and non-openCL (CPU only) versions.
Group: Development/Libraries/C and C++
Requires: %{name}
# Make sure we do not install both openCL and non-openCL (CPU only) versions.
%if "%{target}" == "opencl"
Conflicts: armnn-extratests
%else
Conflicts: armnn-opencl-extratests
%endif
Requires: %{name}
%description -n %{name}-extratests
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow and Caffe, allowing them to run efficiently without
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow Lite, allowing them to run efficiently without
modification across Arm Cortex CPUs and Arm Mali GPUs.
This package contains additionnal downstream tests for armnn.
%endif
%package -n libarmnn%{version_major}%{?package_suffix}
%if "%{target}" == "opencl"
Conflicts: libarmnn%{version_major}
%else
Conflicts: libarmnn%{version_major}-opencl
%endif
%package -n libarmnn%{version_lib}%{?package_suffix}
Summary: libarmnn from armnn
Group: Development/Libraries/C and C++
%if "%{target}" == "opencl"
Conflicts: libarmnn%{version_lib}
%else
Conflicts: libarmnn%{version_lib}-opencl
%endif
%description -n libarmnn%{version_major}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow and Caffe, allowing them to run efficiently without
%description -n libarmnn%{version_lib}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow Lite, allowing them to run efficiently without
modification across Arm Cortex CPUs and Arm Mali GPUs.
This package contains the libarmnn library from armnn.
%if %{with armnn_flatbuffers}
%package -n libarmnnSerializer%{version_major}%{?package_suffix}
%package -n libarmnnBasePipeServer%{version_lib}%{?package_suffix}
Summary: libarmnnBasePipeServer from armnn
Group: Development/Libraries/C and C++
%if "%{target}" == "opencl"
Conflicts: libarmnnSerializer%{version_major}
Conflicts: libarmnnBasePipeServer%{version_lib}
%else
Conflicts: libarmnnSerializer%{version_major}-opencl
Conflicts: libarmnnBasePipeServer%{version_lib}-opencl
%endif
%description -n libarmnnBasePipeServer%{version_lib}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow Lite, allowing them to run efficiently without
modification across Arm Cortex CPUs and Arm Mali GPUs.
This package contains the libarmnnBasePipeServer library from armnn.
%package -n libtimelineDecoder%{version_lib}%{?package_suffix}
Summary: libtimelineDecoder from armnn
Group: Development/Libraries/C and C++
%if "%{target}" == "opencl"
Conflicts: libtimelineDecoder%{version_lib}
%else
Conflicts: libtimelineDecoder%{version_lib}-opencl
%endif
%description -n libtimelineDecoder%{version_lib}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow Lite, allowing them to run efficiently without
modification across Arm Cortex CPUs and Arm Mali GPUs.
This package contains the libtimelineDecoder library from armnn.
%package -n libtimelineDecoderJson%{version_lib}%{?package_suffix}
Summary: libtimelineDecoderJson from armnn
Group: Development/Libraries/C and C++
%if "%{target}" == "opencl"
Conflicts: libtimelineDecoderJson%{version_lib}
%else
Conflicts: libtimelineDecoderJson%{version_lib}-opencl
%endif
%description -n libtimelineDecoderJson%{version_lib}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow Lite, allowing them to run efficiently without
modification across Arm Cortex CPUs and Arm Mali GPUs.
This package contains the libtimelineDecoder library from armnn.
%if %{with armnn_flatbuffers}
%package -n libarmnnSerializer%{version_lib}%{?package_suffix}
Summary: libarmnnSerializer from armnn
Group: Development/Libraries/C and C++
%if "%{target}" == "opencl"
Conflicts: libarmnnSerializer%{version_lib}
%else
Conflicts: libarmnnSerializer%{version_lib}-opencl
%endif
%description -n libarmnnSerializer%{version_major}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow and Caffe, allowing them to run efficiently without
%description -n libarmnnSerializer%{version_lib}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow Lite, allowing them to run efficiently without
modification across Arm Cortex CPUs and Arm Mali GPUs.
This package contains the libarmnnSerializer library from armnn.
%package -n libarmnnTfLiteParser%{version_major}%{?package_suffix}
%if "%{target}" == "opencl"
Conflicts: libarmnnTfLiteParser%{version_major}
%else
Conflicts: libarmnnTfLiteParser%{version_major}-opencl
%endif
%package -n libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix}
Summary: libarmnnTfLiteParser from armnn
Group: Development/Libraries/C and C++
%if "%{target}" == "opencl"
Conflicts: libarmnnTfLiteParser%{version_lib_tfliteparser}
%else
Conflicts: libarmnnTfLiteParser%{version_lib_tfliteparser}-opencl
%endif
%description -n libarmnnTfLiteParser%{version_major}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow and Caffe, allowing them to run efficiently without
%description -n libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow Lite, allowing them to run efficiently without
modification across Arm Cortex CPUs and Arm Mali GPUs.
This package contains the libarmnnTfLiteParser library from armnn.
%endif
%if %{with armnn_tf}
%package -n libarmnnTfParser%{version_major}%{?package_suffix}
%if "%{target}" == "opencl"
Conflicts: libarmnnTfParser%{version_major}
%else
Conflicts: libarmnnTfParser%{version_major}-opencl
%endif
Summary: libarmnnTfParser from armnn
Group: Development/Libraries/C and C++
%description -n libarmnnTfParser%{version_major}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow and Caffe, allowing them to run efficiently without
modification across Arm Cortex CPUs and Arm Mali GPUs.
This package contains the libarmnnTfParser library from armnn.
%endif
%if %{with armnn_caffe}
%package -n libarmnnCaffeParser%{version_major}%{?package_suffix}
%if "%{target}" == "opencl"
Conflicts: libarmnnCaffeParser%{version_major}
%else
Conflicts: libarmnnCaffeParser%{version_major}-opencl
%endif
Summary: libarmnnCaffeParser from armnn
Group: Development/Libraries/C and C++
%description -n libarmnnCaffeParser%{version_major}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow and Caffe, allowing them to run efficiently without
modification across Arm Cortex CPUs and Arm Mali GPUs.
This package contains the libarmnnCaffeParser library from armnn.
%endif
%if %{with armnn_onnx}
%package -n libarmnnOnnxParser%{version_major}%{?package_suffix}
%if "%{target}" == "opencl"
Conflicts: libarmnnOnnxParser%{version_major}
%else
Conflicts: libarmnnOnnxParser%{version_major}-opencl
%endif
%package -n libarmnnOnnxParser%{version_lib_onnxparser}%{?package_suffix}
Summary: libarmnnOnnxParser from armnn
Group: Development/Libraries/C and C++
%if "%{target}" == "opencl"
Conflicts: libarmnnOnnxParser%{version_lib_onnxparser}
%else
Conflicts: libarmnnOnnxParser%{version_lib_onnxparser}-opencl
%endif
%description -n libarmnnOnnxParser%{version_major}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow and Caffe, allowing them to run efficiently without
%description -n libarmnnOnnxParser%{version_lib_onnxparser}%{?package_suffix}
Arm NN is an inference engine for CPUs, GPUs and NPUs.
It bridges the gap between existing NN frameworks and the underlying IP.
It enables efficient translation of existing neural network frameworks,
such as TensorFlow Lite, allowing them to run efficiently without
modification across Arm Cortex CPUs and Arm Mali GPUs.
This package contains the libarmnnOnnxParser library from armnn.
@ -359,23 +347,16 @@ This package contains the libarmnnOnnxParser library from armnn.
%prep
%setup -q -n armnn-%{version}
%patch1 -p1
%patch2 -p1
%patch3 -p1
%patch4 -p1
%patch5 -p1
%if %{with armnn_extra_tests}
%patch200 -p1
%patch201 -p1
%patch202 -p1
%patch203 -p1
%patch204 -p1
%patch205 -p1
# Boost fixes for dynamic linking
sed -i 's/add_definitions("-DBOOST_ALL_NO_LIB")/add_definitions("-DBOOST_ALL_DYN_LINK")/' ./cmake/GlobalConfig.cmake
sed -i 's/set(Boost_USE_STATIC_LIBS ON)/set(Boost_USE_STATIC_LIBS OFF)/' ./cmake/GlobalConfig.cmake
sed -i 's/find_package(Boost 1.59 REQUIRED COMPONENTS unit_test_framework system filesystem log program_options)/find_package(Boost 1.59 REQUIRED COMPONENTS unit_test_framework system filesystem log thread program_options)/' ./cmake/GlobalConfig.cmake
# Build fix
sed -i 's/-Wsign-conversion//' ./cmake/GlobalConfig.cmake
# Add Boost log as downstream extra test requires it
sed -i 's/ find_package(Boost 1.59 REQUIRED COMPONENTS unit_test_framework)/find_package(Boost 1.59 REQUIRED COMPONENTS unit_test_framework filesystem system log program_options)/' ./cmake/GlobalConfig.cmake
%endif
%build
%if %{with armnn_onnx}
@ -384,14 +365,10 @@ PROTO=$(find %{_libdir} -name onnx.proto)
protoc $PROTO --proto_path=. --proto_path=%{_includedir} --proto_path=$(dirname $(find %{_libdir} -name onnx)) --cpp_out=./onnx_deps
%endif
%cmake \
-DCMAKE_CXX_FLAGS:STRING="%{optflags} -pthread" \
-DCMAKE_SKIP_RPATH=True \
-DSHARED_BOOST=1 \
-DCMAKE_CXX_FLAGS:STRING="%{optflags} -pthread " \
-DBOOST_LIBRARYDIR=%{_libdir} \
%if %{with armnn_caffe}
-DBUILD_CAFFE_PARSER=ON \
%else
-DBUILD_CAFFE_PARSER=OFF \
%endif
-DCAFFE_GENERATED_SOURCES=%{_includedir}/ \
%if %{with armnn_onnx}
-DBUILD_ONNX_PARSER=ON \
-DONNX_GENERATED_SOURCES=../onnx_deps/ \
@ -402,25 +379,18 @@ protoc $PROTO --proto_path=. --proto_path=%{_includedir} --proto_path=$(dirname
-DBUILD_ARMNN_SERIALIZER=ON \
-DFLATC_DIR=%{_bindir} \
-DFLATBUFFERS_INCLUDE_PATH=%{_includedir} \
-DBUILD_ARMNN_QUANTIZER=ON \
-DBUILD_TF_LITE_PARSER=ON \
-DTfLite_Schema_INCLUDE_PATH=%{_includedir}/tensorflow/lite/schema/ \
-DTF_LITE_SCHEMA_INCLUDE_PATH=%{_includedir}/tensorflow/lite/schema/ \
%else
-DBUILD_ARMNN_SERIALIZER=OFF \
-DBUILD_ARMNN_QUANTIZER=OFF \
-DBUILD_TF_LITE_PARSER=OFF \
%endif
%if %{with armnn_tf}
-DBUILD_TF_PARSER=ON \
-DTF_GENERATED_SOURCES=%{python3_sitelib}/tensorflow/include/ \
%else
-DBUILD_TF_PARSER=OFF \
%endif
%if %{with compute_neon} || %{with compute_cl}
-DARMCOMPUTE_INCLUDE=%{_includedir} \
-DHALF_INCLUDE=%{_includedir}/half \
-DARMCOMPUTE_BUILD_DIR=%{_libdir} \
-DARMCOMPUTE_ROOT=/usr \
-DARMCOMPUTE_ROOT=%{_prefix} \
%endif
%if %{with compute_neon}
-DARMCOMPUTENEON=ON \
@ -446,20 +416,27 @@ protoc $PROTO --proto_path=. --proto_path=%{_includedir} --proto_path=$(dirname
-DBUILD_UNIT_TESTS=OFF \
-DBUILD_TESTS=OFF \
%endif
%if %{with PyArmnn}
-DBUILD_PYTHON_WHL=ON \
-DBUILD_PYTHON_SRC=ON \
%else
-DBUILD_PYTHON_WHL=OFF \
-DBUILD_PYTHON_SRC=OFF \
%endif
%if %{with armnn_extra_tests}
-DBUILD_ARMNN_EXAMPLES=ON
%else
-DBUILD_ARMNN_EXAMPLES=OFF
%endif
%if %{suse_version} > 1500
%if 0%{?suse_version} > 1500
%cmake_build
%else
%make_jobs
%endif
%if %{with armnn_tests}
pushd tests/
%if %{suse_version} > 1500
%if 0%{?suse_version} > 1500
%cmake_build
%else
%make_jobs
@ -479,62 +456,55 @@ find ./build/tests -maxdepth 1 -type f -executable -exec cp $CP_ARGS {} %{buildr
# Install Sample app
cp $CP_ARGS ./build/samples/SimpleSample %{buildroot}%{_bindir}
%endif
# Drop static libs - https://github.com/ARM-software/armnn/issues/514
rm -f %{buildroot}%{_libdir}/*.a
# openCL UnitTests are failing in OBS due to the lack of openCL device
%if %{without compute_cl} && %{with armnn_tests}
%check
# Run tests
LD_LIBRARY_PATH="$(pwd)/build/" \
./build/UnitTests
./build/UnitTests $UnitTestFlags
%endif
%post -n libarmnn%{version_major}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnn%{version_major}%{?package_suffix} -p /sbin/ldconfig
%post -n libarmnn%{version_lib}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnn%{version_lib}%{?package_suffix} -p /sbin/ldconfig
%post -n libarmnnBasePipeServer%{version_lib}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnnBasePipeServer%{version_lib}%{?package_suffix} -p /sbin/ldconfig
%post -n libtimelineDecoderJson%{version_lib}%{?package_suffix} -p /sbin/ldconfig
%postun -n libtimelineDecoderJson%{version_lib}%{?package_suffix} -p /sbin/ldconfig
%post -n libtimelineDecoder%{version_lib}%{?package_suffix} -p /sbin/ldconfig
%postun -n libtimelineDecoder%{version_lib}%{?package_suffix} -p /sbin/ldconfig
%if %{with armnn_flatbuffers}
%post -n libarmnnSerializer%{version_major}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnnSerializer%{version_major}%{?package_suffix} -p /sbin/ldconfig
%post -n libarmnnSerializer%{version_lib}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnnSerializer%{version_lib}%{?package_suffix} -p /sbin/ldconfig
%post -n libarmnnTfLiteParser%{version_major}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnnTfLiteParser%{version_major}%{?package_suffix} -p /sbin/ldconfig
%endif
%if %{with armnn_tf}
%post -n libarmnnTfParser%{version_major}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnnTfParser%{version_major}%{?package_suffix} -p /sbin/ldconfig
%endif
%if %{with armnn_caffe}
%post -n libarmnnCaffeParser%{version_major}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnnCaffeParser%{version_major}%{?package_suffix} -p /sbin/ldconfig
%post -n libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} -p /sbin/ldconfig
%endif
%if %{with armnn_onnx}
%post -n libarmnnOnnxParser%{version_major}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnnOnnxParser%{version_major}%{?package_suffix} -p /sbin/ldconfig
%post -n libarmnnOnnxParser%{version_lib_onnxparser}%{?package_suffix} -p /sbin/ldconfig
%postun -n libarmnnOnnxParser%{version_lib_onnxparser}%{?package_suffix} -p /sbin/ldconfig
%endif
%files
%defattr(-,root,root)
%doc README.md
%license LICENSE
%if %{with armnn_tests}
%{_bindir}/ExecuteNetwork
%if %{with armnn_caffe}
%{_bindir}/Caffe*-Armnn
%{_bindir}/MultipleNetworksCifar10
%endif
%if %{with armnn_flatbuffers}
%{_bindir}/ArmnnConverter
%{_bindir}/TfLite*-Armnn
%{_bindir}/Image*Generator
%endif
%if %{with armnn_onnx}
%{_bindir}/Onnx*-Armnn
%endif
%if %{with armnn_tf}
%{_bindir}/Tf*-Armnn
%endif
%if %{with armnn_flatbuffers}
%{_bindir}/SimpleSample
%endif
@ -545,29 +515,28 @@ LD_LIBRARY_PATH="$(pwd)/build/" \
%{_bindir}/ArmnnExamples
%endif
%files -n libarmnn%{version_major}%{?package_suffix}
%files -n libarmnn%{version_lib}%{?package_suffix}
%{_libdir}/libarmnn.so.*
%files -n libarmnnBasePipeServer%{version_lib}%{?package_suffix}
%{_libdir}/libarmnnBasePipeServer.so.*
%files -n libtimelineDecoder%{version_lib}%{?package_suffix}
%{_libdir}/libtimelineDecoder.so.*
%files -n libtimelineDecoderJson%{version_lib}%{?package_suffix}
%{_libdir}/libtimelineDecoderJson.so.*
%if %{with armnn_flatbuffers}
%files -n libarmnnSerializer%{version_major}%{?package_suffix}
%files -n libarmnnSerializer%{version_lib}%{?package_suffix}
%{_libdir}/libarmnnSerializer.so.*
%files -n libarmnnTfLiteParser%{version_major}%{?package_suffix}
%files -n libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix}
%{_libdir}/libarmnnTfLiteParser.so.*
%endif
%if %{with armnn_tf}
%files -n libarmnnTfParser%{version_major}%{?package_suffix}
%{_libdir}/libarmnnTfParser.so.*
%endif
%if %{with armnn_caffe}
%files -n libarmnnCaffeParser%{version_major}%{?package_suffix}
%{_libdir}/libarmnnCaffeParser.so.*
%endif
%if %{with armnn_onnx}
%files -n libarmnnOnnxParser%{version_major}%{?package_suffix}
%files -n libarmnnOnnxParser%{version_lib_onnxparser}%{?package_suffix}
%{_libdir}/libarmnnOnnxParser.so.*
%endif
@ -575,34 +544,40 @@ LD_LIBRARY_PATH="$(pwd)/build/" \
%defattr(-,root,root)
%dir %{_includedir}/armnn/
%{_includedir}/armnn/*.hpp
%dir %{_includedir}/armnnCaffeParser/
%{_includedir}/armnnCaffeParser/ICaffeParser.hpp
%dir %{_includedir}/armnn/backends
%{_includedir}/armnn/backends/CMakeLists.txt
%{_includedir}/armnn/backends/*.hpp
%dir %{_includedir}/armnn/backends/profiling
%{_includedir}/armnn/backends/profiling/*.hpp
%dir %{_includedir}/armnn/profiling
%{_includedir}/armnn/profiling/*.hpp
%dir %{_includedir}/armnn/utility
%{_includedir}/armnn/utility/*.hpp
%dir %{_includedir}/armnnUtils
%{_includedir}/armnnUtils/*.hpp
%dir %{_includedir}/armnnOnnxParser/
%{_includedir}/armnnOnnxParser/IOnnxParser.hpp
%{_includedir}/armnnOnnxParser/*.hpp
%dir %{_includedir}/armnnTfLiteParser/
%{_includedir}/armnnTfLiteParser/ITfLiteParser.hpp
%dir %{_includedir}/armnnTfParser/
%{_includedir}/armnnTfParser/ITfParser.hpp
%{_includedir}/armnnTfLiteParser/*.hpp
%dir %{_includedir}/armnnDeserializer/
%{_includedir}/armnnDeserializer/IDeserializer.hpp
%dir %{_includedir}/armnnQuantizer
%{_includedir}/armnnQuantizer/INetworkQuantizer.hpp
%dir %{_includedir}/armnnSerializer/
%{_includedir}/armnnSerializer/ISerializer.hpp
%dir %{_includedir}/armnnTestUtils/
%{_includedir}/armnnTestUtils/*.hpp
%dir %{_libdir}/cmake/armnn
%{_libdir}/cmake/armnn/*
%{_libdir}/libarmnn.so
%{_libdir}/libarmnnBasePipeServer.so
%{_libdir}/libtimelineDecoder.so
%{_libdir}/libtimelineDecoderJson.so
%if %{with armnn_flatbuffers}
%{_libdir}/libarmnnSerializer.so
%{_libdir}/libarmnnTfLiteParser.so
%endif
%if %{with armnn_tf}
%{_libdir}/libarmnnTfParser.so
%endif
%if %{with armnn_caffe}
%{_libdir}/libarmnnCaffeParser.so
%endif
%{_libdir}/libarmnnTestUtils.so
%if %{with armnn_onnx}
%{_libdir}/libarmnnOnnxParser.so
%endif
%changelog