From a54685d0bf10b59d2de9116173661453b6a0345688513e3d7c231c9549536a4f Mon Sep 17 00:00:00 2001 From: Guillaume GARDET Date: Fri, 6 Sep 2024 13:30:37 +0000 Subject: [PATCH] - Add patch to fix build on armv7: * armnn-fix-armv7.patch OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=92 --- .gitattributes | 23 + .gitignore | 1 + ...add-more-test-command-line-arguments.patch | 76 ++ 0005-add-armnn-mobilenet-test-example.patch | 71 ++ 0006-armnn-mobilenet-test-example.patch | 680 ++++++++++++++++++ ...ine-options-for-video-port-selection.patch | 60 ++ ...mples-update-for-19.08-modifications.patch | 28 + _constraints | 10 + _multibuild | 3 + armnn-24.05.tar.gz | 3 + armnn-24.08.tar.gz | 3 + armnn-fix-armv7.patch | 18 + armnn-fix_find_opencv.patch | 19 + armnn-rpmlintrc | 4 + armnn.changes | 597 +++++++++++++++ armnn.spec | 623 ++++++++++++++++ 16 files changed, 2219 insertions(+) create mode 100644 .gitattributes create mode 100644 .gitignore create mode 100644 0003-add-more-test-command-line-arguments.patch create mode 100644 0005-add-armnn-mobilenet-test-example.patch create mode 100644 0006-armnn-mobilenet-test-example.patch create mode 100644 0009-command-line-options-for-video-port-selection.patch create mode 100644 0010-armnnexamples-update-for-19.08-modifications.patch create mode 100644 _constraints create mode 100644 _multibuild create mode 100644 armnn-24.05.tar.gz create mode 100644 armnn-24.08.tar.gz create mode 100644 armnn-fix-armv7.patch create mode 100644 armnn-fix_find_opencv.patch create mode 100644 armnn-rpmlintrc create mode 100644 armnn.changes create mode 100644 armnn.spec diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..9b03811 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,23 @@ +## Default LFS +*.7z filter=lfs diff=lfs merge=lfs -text +*.bsp filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.gem filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.jar filter=lfs diff=lfs merge=lfs -text +*.lz filter=lfs diff=lfs merge=lfs -text +*.lzma filter=lfs diff=lfs merge=lfs -text +*.obscpio filter=lfs diff=lfs merge=lfs -text +*.oxt filter=lfs diff=lfs merge=lfs -text +*.pdf filter=lfs diff=lfs merge=lfs -text +*.png filter=lfs diff=lfs merge=lfs -text +*.rpm filter=lfs diff=lfs merge=lfs -text +*.tbz filter=lfs diff=lfs merge=lfs -text +*.tbz2 filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.ttf filter=lfs diff=lfs merge=lfs -text +*.txz filter=lfs diff=lfs merge=lfs -text +*.whl filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..57affb6 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +.osc diff --git a/0003-add-more-test-command-line-arguments.patch b/0003-add-more-test-command-line-arguments.patch new file mode 100644 index 0000000..06d4354 --- /dev/null +++ b/0003-add-more-test-command-line-arguments.patch @@ -0,0 +1,76 @@ +From 964cb82f3b811aec6663255ab0aa589f0a3be0ee Mon Sep 17 00:00:00 2001 +From: Qin Su +Date: Fri, 22 Feb 2019 14:10:07 -0500 +Subject: [PATCH] add more test command line arguments + +Updated by Guillaume_G to apply properly (s/BOOST_ASSERT/ARMNN_ASSERT/) + +Upstream-Status: Inappropriate [TI only test code] +Signed-off-by: Qin Su +--- + tests/InferenceTest.inl | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 49 insertions(+) + +diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl +index 538720b..6fd21b8 100644 +--- a/tests/InferenceTest.inl ++++ b/tests/InferenceTest.inl +@@ -326,6 +326,55 @@ int ClassifierInferenceTestMain(int argc, + ARMNN_ASSERT(modelFilename); + ARMNN_ASSERT(inputBindingName); + ARMNN_ASSERT(outputBindingName); ++ int count; ++ const char *p_input; ++ char inmodelname[500]; ++ char outtensorname[500]; ++ ++ /* parse command line */ ++ for (count = 1; count < argc; count++) ++ { ++ if (*(argv[count]) == '+') ++ { ++ p_input = argv[count] + 1; ++ switch (*(p_input)) ++ { ++ case 'i': ++ case 'I': ++ strcpy(inmodelname, p_input + 2); ++ modelFilename = &inmodelname[0]; ++ std::cout << "Input model = " << modelFilename << std::endl; ++ break; ++ case 'o': ++ case 'O': ++ strcpy(outtensorname, p_input + 2); ++ outputBindingName = &outtensorname[0]; ++ std::cout << "out tensor name = " << outputBindingName << std::endl; ++ break; ++ default: ++ break; ++ } ++ } ++ else if (*(argv[count]) == '-') ++ { ++ p_input = argv[count] + 1; ++ switch (*(p_input)) ++ { ++ case '-': ++ p_input = argv[count] + 2; ++ case 'h': ++ case 'H': ++ std::cout <<"\nAdditional Options: " << std::endl; ++ std::cout <<" +i Set user specified inference model name." << std::endl; ++ std::cout <<" If not set, default name is used." << std::endl; ++ std::cout <<" +o Set user specified output tensor name." << std::endl; ++ std::cout <<" If not set, default name is used.\n" << std::endl; ++ break; ++ default: ++ break; ++ } ++ } ++ } + + return InferenceTestMain(argc, argv, defaultTestCaseIds, + [=] +-- +1.9.1 + diff --git a/0005-add-armnn-mobilenet-test-example.patch b/0005-add-armnn-mobilenet-test-example.patch new file mode 100644 index 0000000..987635c --- /dev/null +++ b/0005-add-armnn-mobilenet-test-example.patch @@ -0,0 +1,71 @@ +From 99a6c339f1828d3cd1b193cf702bada9011d900b Mon Sep 17 00:00:00 2001 +From: Djordje Senicic +Date: Mon, 24 Jun 2019 14:29:19 -0400 +Subject: [PATCH] add armnn mobilenet test example + +Upstream-Status: Inappropriate [TI only test code] +Signed-off-by: Qin Su +Signed-off-by: Djordje Senicic +[Guillaume's update: Add boost_log dep] +[Guillaume's update: Update to apply on top of 20.08] +--- + tests/CMakeLists.txt | 41 +++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 41 insertions(+) + +diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt +index dfcf4b48..5a78d3a6 100644 +--- a/tests/CMakeLists.txt ++++ b/tests/CMakeLists.txt +@@ -1,3 +1,6 @@ ++find_package( OpenCV REQUIRED ) ++include_directories( ${OpenCV_INCLUDE_DIRS} ) ++ + # UnitTests + include(CheckIncludeFiles) + +@@ -348,3 +351,42 @@ if(BUILD_ARMNN_QUANTIZER) + target_include_directories(ImageCSVFileGenerator PRIVATE ../src/armnnUtils) + ImageTensorExecutor(ImageCSVFileGenerator) + endif() ++ ++if (BUILD_ARMNN_EXAMPLES) ++ set(ArmnnExamples_sources ++ ArmnnExamples/ArmnnExamples.cpp) ++ ++ add_executable_ex(ArmnnExamples ${ArmnnExamples_sources}) ++ ++ target_include_directories(ArmnnExamples PRIVATE ../src/armnnUtils) ++ target_include_directories(ArmnnExamples PRIVATE ../src/armnn) ++ target_include_directories(ArmnnExamples PRIVATE ../src/backends) ++ ++ if (BUILD_CAFFE_PARSER) ++ target_link_libraries(ArmnnExamples armnnCaffeParser) ++ endif() ++ if (BUILD_TF_PARSER) ++ target_link_libraries(ArmnnExamples armnnTfParser) ++ endif() ++ ++ if (BUILD_TF_LITE_PARSER) ++ target_link_libraries(ArmnnExamples armnnTfLiteParser) ++ endif() ++ if (BUILD_ONNX_PARSER) ++ target_link_libraries(ArmnnExamples armnnOnnxParser) ++ endif() ++ ++ target_link_libraries(ArmnnExamples armnn) ++ target_link_libraries(ArmnnExamples ${CMAKE_THREAD_LIBS_INIT}) ++ if(OPENCL_LIBRARIES) ++ target_link_libraries(ArmnnExamples ${OPENCL_LIBRARIES}) ++ endif() ++ ++ target_link_libraries(ArmnnExamples ++ ${Boost_LOG_LIBRARY} ++ ${Boost_SYSTEM_LIBRARY} ++ ${Boost_FILESYSTEM_LIBRARY} ++ ${Boost_PROGRAM_OPTIONS_LIBRARY} ++ ${OpenCV_LIBS}) ++ addDllCopyCommands(ArmnnExamples) ++endif() +-- +2.17.1 + diff --git a/0006-armnn-mobilenet-test-example.patch b/0006-armnn-mobilenet-test-example.patch new file mode 100644 index 0000000..fd2e210 --- /dev/null +++ b/0006-armnn-mobilenet-test-example.patch @@ -0,0 +1,680 @@ +From 4d5e7db268a4f816e24449e8ad011e35890f0c7e Mon Sep 17 00:00:00 2001 +From: Qin Su +Date: Fri, 22 Feb 2019 13:39:09 -0500 +Subject: [PATCH] armnn mobilenet test example + +Upstream-Status: Inappropriate [TI only test code] +Signed-off-by: Qin Su + +[Guillaume's update: s#Logging.hpp#armnn/Logging.hpp#] +[Guillaume's update: Add #include ] +[Guillaume's update: Drop armnnUtils::ConfigureLogging(...)] +[Guillaume's update: Handle boost::variant to mapbox::util::variant update] +--- + tests/ArmnnExamples/ArmnnExamples.cpp | 654 ++++++++++++++++++++++++++++++++++ + 1 file changed, 654 insertions(+) + create mode 100644 tests/ArmnnExamples/ArmnnExamples.cpp + +diff --git a/tests/ArmnnExamples/ArmnnExamples.cpp b/tests/ArmnnExamples/ArmnnExamples.cpp +new file mode 100644 +index 0000000..53a11cc +--- /dev/null ++++ b/tests/ArmnnExamples/ArmnnExamples.cpp +@@ -0,0 +1,654 @@ ++/****************************************************************************** ++ * Copyright (c) 2018, Texas Instruments Incorporated - http://www.ti.com/ ++ * All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the name of Texas Instruments Incorporated nor the ++ * names of its contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF ++ * THE POSSIBILITY OF SUCH DAMAGE. ++ *****************************************************************************/// ++// Copyright © 2017 Arm Ltd. All rights reserved. ++// See LICENSE file in the project root for full license information. ++// ++#include ++#include ++ ++#include ++#include ++ ++#if defined(ARMNN_CAFFE_PARSER) ++#include "armnnCaffeParser/ICaffeParser.hpp" ++#endif ++#if defined(ARMNN_TF_PARSER) ++#include "armnnTfParser/ITfParser.hpp" ++#endif ++#if defined(ARMNN_TF_LITE_PARSER) ++#include "armnnTfLiteParser/ITfLiteParser.hpp" ++#endif ++#if defined(ARMNN_ONNX_PARSER) ++#include "armnnOnnxParser/IOnnxParser.hpp" ++#endif ++#include /*#include "CsvReader.hpp"*/ ++#include "../InferenceTest.hpp" ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "opencv2/core.hpp" ++#include "opencv2/imgproc.hpp" ++#include "opencv2/highgui.hpp" ++#include "opencv2/videoio.hpp" ++#include ++ ++using namespace cv; ++ ++#define INPUT_IMAGE 0 ++#define INPUT_VIDEO 1 ++#define INPUT_CAMERA 2 ++ ++Mat test_image; ++Rect rectCrop; ++ ++time_point predictStart; ++time_point predictEnd; ++ ++void imagenetCallBackFunc(int event, int x, int y, int flags, void* userdata) ++{ ++ if ( event == EVENT_RBUTTONDOWN ) ++ { ++ std::cout << "Right button of the mouse is clicked - position (" << x << ", " << y << ")" << " ... prepare to exit!" << std::endl; ++ exit(0); ++ } ++} ++ ++inline float Lerpfloat(float a, float b, float w) ++{ ++ return w * b + (1.f - w) * a; ++} ++ ++// Load a single image ++struct ImageData ++{ ++ unsigned int m_width; ++ unsigned int m_height; ++ unsigned int m_chnum; ++ unsigned int m_size; ++ std::vector m_image; ++}; ++// Load a single image ++std::unique_ptr loadImageData(std::string image_path, VideoCapture &cap, cv::Mat img, int input_type) ++{ ++ //cv::Mat img; ++ if (input_type == INPUT_IMAGE) ++ { ++ /* use OpenCV to get the image */ ++ img = cv::imread(image_path, CV_LOAD_IMAGE_COLOR); ++ } ++ cv::cvtColor(img, img, CV_BGR2RGB); //convert image format from BGR(openCV format) to RGB (armnn required format). ++ ++ // store image and label in output Image ++ std::unique_ptr ret(new ImageData); ++ ret->m_width = static_cast(img.cols); ++ ret->m_height = static_cast(img.rows); ++ ret->m_chnum = static_cast(img.channels()); ++ ret->m_size = static_cast(img.cols*img.rows*img.channels()); ++ ret->m_image.resize(ret->m_size); ++ ++ for (unsigned int i = 0; i < ret->m_size; i++) ++ { ++ ret->m_image[i] = static_cast(img.data[i]); ++ } ++ return ret; ++} ++// to resize input tensor size ++std::vector ResizeBilinear(std::vector input, ++ const unsigned int inWidth, ++ const unsigned int inHeight, ++ const unsigned int inChnum, ++ const unsigned int outputWidth, ++ const unsigned int outputHeight) ++{ ++ std::vector out; ++ out.resize(outputWidth * outputHeight * 3); ++ ++ // We follow the definition of TensorFlow and AndroidNN: the top-left corner of a texel in the output ++ // image is projected into the input image to figure out the interpolants and weights. Note that this ++ // will yield different results than if projecting the centre of output texels. ++ ++ const unsigned int inputWidth = inWidth; ++ const unsigned int inputHeight = inHeight; ++ ++ // How much to scale pixel coordinates in the output image to get the corresponding pixel coordinates ++ // in the input image. ++ const float scaleY = boost::numeric_cast(inputHeight) / boost::numeric_cast(outputHeight); ++ const float scaleX = boost::numeric_cast(inputWidth) / boost::numeric_cast(outputWidth); ++ ++ uint8_t rgb_x0y0[3]; ++ uint8_t rgb_x1y0[3]; ++ uint8_t rgb_x0y1[3]; ++ uint8_t rgb_x1y1[3]; ++ unsigned int pixelOffset00, pixelOffset10, pixelOffset01, pixelOffset11; ++ for (unsigned int y = 0; y < outputHeight; ++y) ++ { ++ // Corresponding real-valued height coordinate in input image. ++ const float iy = boost::numeric_cast(y) * scaleY; ++ // Discrete height coordinate of top-left texel (in the 2x2 texel area used for interpolation). ++ const float fiy = floorf(iy); ++ const unsigned int y0 = boost::numeric_cast(fiy); ++ ++ // Interpolation weight (range [0,1]) ++ const float yw = iy - fiy; ++ ++ for (unsigned int x = 0; x < outputWidth; ++x) ++ { ++ // Real-valued and discrete width coordinates in input image. ++ const float ix = boost::numeric_cast(x) * scaleX; ++ const float fix = floorf(ix); ++ const unsigned int x0 = boost::numeric_cast(fix); ++ ++ // Interpolation weight (range [0,1]). ++ const float xw = ix - fix; ++ ++ // Discrete width/height coordinates of texels below and to the right of (x0, y0). ++ const unsigned int x1 = std::min(x0 + 1, inputWidth - 1u); ++ const unsigned int y1 = std::min(y0 + 1, inputHeight - 1u); ++ ++ pixelOffset00 = x0 * inChnum + y0 * inputWidth * inChnum; ++ pixelOffset10 = x1 * inChnum + y0 * inputWidth * inChnum; ++ pixelOffset01 = x0 * inChnum + y1 * inputWidth * inChnum; ++ pixelOffset11 = x1 * inChnum + y1 * inputWidth * inChnum; ++ for (unsigned int c = 0; c < 3; ++c) ++ { ++ rgb_x0y0[c] = input[pixelOffset00+c]; ++ rgb_x1y0[c] = input[pixelOffset10+c]; ++ rgb_x0y1[c] = input[pixelOffset01+c]; ++ rgb_x1y1[c] = input[pixelOffset11+c]; ++ } ++ ++ for (unsigned c=0; c<3; ++c) ++ { ++ const float ly0 = Lerpfloat(float(rgb_x0y0[c]), float(rgb_x1y0[c]), xw); ++ const float ly1 = Lerpfloat(float(rgb_x0y1[c]), float(rgb_x1y1[c]), xw); ++ const float l = Lerpfloat(ly0, ly1, yw); ++ out[(3*((y*outputWidth)+x)) + c] = static_cast(l)/255.0f; ++ } ++ } ++ } ++ return out; ++} ++ ++namespace ++{ ++ ++ // Configure boost::program_options for command-line parsing and validation. ++ namespace po = boost::program_options; ++ ++ template ++ std::vector ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc) ++ { ++ std::vector result; ++ // Processes line-by-line. ++ std::string line; ++ while (std::getline(stream, line)) ++ { ++ std::vector tokens; ++ try ++ { ++ // Coverity fix: boost::split() may throw an exception of type boost::bad_function_call. ++ boost::split(tokens, line, boost::algorithm::is_any_of("\t ,;:"), boost::token_compress_on); ++ } ++ catch (const std::exception& e) ++ { ++ BOOST_LOG_TRIVIAL(error) << "An error occurred when splitting tokens: " << e.what(); ++ continue; ++ } ++ for (const std::string& token : tokens) ++ { ++ if (!token.empty()) ++ { ++ try ++ { ++ result.push_back(parseElementFunc(token)); ++ } ++ catch (const std::exception&) ++ { ++ BOOST_LOG_TRIVIAL(error) << "'" << token << "' is not a valid number. It has been ignored."; ++ } ++ } ++ } ++ } ++ ++ return result; ++ } ++ ++ template ++ std::vector ParseArray(std::istream& stream); ++ template<> ++ std::vector ParseArray(std::istream& stream) ++ { ++ return ParseArrayImpl(stream, ++ [](const std::string& s) { return boost::numeric_cast(std::stoi(s)); }); ++ } ++ void RemoveDuplicateDevices(std::vector& computeDevices) ++ { ++ // Mark the duplicate devices as 'Undefined'. ++ for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i) ++ { ++ for (auto j = std::next(i); j != computeDevices.end(); ++j) ++ { ++ if (*j == *i) ++ { ++ *j = armnn::Compute::Undefined; ++ } ++ } ++ } ++ ++ // Remove 'Undefined' devices. ++ computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined), ++ computeDevices.end()); ++ } ++} // namespace ++ ++template ++int MainImpl(const char* modelPath, ++ bool isModelBinary, ++ const std::vector& computeDevices, ++ const char* inputName, ++ const armnn::TensorShape* inputTensorShape, ++ const char* inputTensorDataFilePath, ++ const char* outputName, ++ bool enableProfiling, ++ const size_t number_frame, ++ const std::shared_ptr& runtime = nullptr) ++{ ++ // Loads input tensor. ++ std::vector input; ++ std::vector input_resized; ++ using TContainer = mapbox::util::variant, std::vector, std::vector>; ++ ++ try ++ { ++ // Creates an InferenceModel, which will parse the model and load it into an IRuntime. ++ typename InferenceModel::Params params; ++ //const armnn::TensorShape inputTensorShape({ 1, 224, 224 3}); ++ ++ params.m_ModelPath = modelPath; ++ params.m_IsModelBinary = isModelBinary; ++ params.m_ComputeDevices = computeDevices; ++ params.m_InputBindings = { inputName }; ++ params.m_InputShapes = { *inputTensorShape }; ++ params.m_OutputBindings = { outputName }; ++ //params.m_EnableProfiling = enableProfiling; ++ params.m_SubgraphId = 0; ++ InferenceModel model(params, enableProfiling, runtime); ++ ++ VideoCapture cap; ++ int input_type = INPUT_IMAGE; ++ std::string filename = inputTensorDataFilePath; ++ ++ size_t i = filename.rfind("camera_live_input", filename.length()); ++ if (i != string::npos) ++ { ++ cap = VideoCapture(1); ++ namedWindow("ARMNN MobileNet Example", WINDOW_AUTOSIZE | CV_GUI_NORMAL); ++ input_type = INPUT_CAMERA; //camera input ++ } ++ else if((filename.substr(filename.find_last_of(".") + 1) == "mp4") || ++ (filename.substr(filename.find_last_of(".") + 1) == "mov") || ++ (filename.substr(filename.find_last_of(".") + 1) == "avi") ) ++ { ++ cap = VideoCapture(inputTensorDataFilePath); ++ if (! cap.isOpened()) ++ { ++ std::cout << "Cannot open video input: " << inputTensorDataFilePath << std::endl; ++ return (-1); ++ } ++ ++ namedWindow("ARMNN MobileNet Example", WINDOW_AUTOSIZE | CV_GUI_NORMAL); ++ input_type = INPUT_VIDEO; //video clip input ++ } ++ if (input_type != INPUT_IMAGE) ++ { ++ //set the callback function for any mouse event. Used for right click mouse to exit the program. ++ setMouseCallback("ARMNN MobileNet Example", imagenetCallBackFunc, NULL); ++ } ++ ++ for (unsigned int i=0; i < number_frame; i++) ++ { ++ if (input_type != INPUT_IMAGE) ++ { ++ cap.grab(); ++ cap.retrieve(test_image); ++ } ++ std::unique_ptr inputData = loadImageData(inputTensorDataFilePath, cap, test_image, input_type); ++ input.resize(inputData->m_size); ++ ++ input = std::move(inputData->m_image); ++ input_resized = ResizeBilinear(input, inputData->m_width, inputData->m_height, inputData->m_chnum, 224, 224); ++ ++ // Set up input data container ++ std::vector inputDataContainer(1, std::move(input_resized)); ++ ++ // Set up output data container ++ std::vector outputDataContainers; ++ outputDataContainers.push_back(std::vector(model.GetOutputSize())); ++ ++ //profile start ++ predictStart = high_resolution_clock::now(); ++ // Execute model ++ model.Run(inputDataContainer, outputDataContainers); ++ //profile end ++ predictEnd = high_resolution_clock::now(); ++ ++ double timeTakenS = duration(predictEnd - predictStart).count(); ++ double preformance_ret = static_cast(1.0/timeTakenS); ++ ++ //retrieve output ++ std::vector& outputData = (mapbox::util::get>(outputDataContainers[0])); ++ //output TOP predictions ++ std::string predict_target_name; ++ // find the out with the highest confidence ++ int label = static_cast(std::distance(outputData.begin(), std::max_element(outputData.begin(), outputData.end()))); ++ std::fstream file("/usr/share/arm/armnn/models/labels.txt"); ++ //std::string predict_target_name; ++ for (int i=0; i <= label; i++) ++ { ++ std::getline(file, predict_target_name); ++ } ++ //get the probability of the top prediction ++ float prob = 100*outputData.data()[label]; ++ //clean the top one so as to find the second top prediction ++ outputData.data()[label] = 0; ++ std::cout << "Top(1) prediction is " << predict_target_name << " with confidence: " << prob << "%" << std::endl; ++ //output next TOP 4 predictions ++ for (int ii=1; ii<5; ii++) ++ { ++ std::string predict_target_name_n; ++ // find the out with the highest confidence ++ int label = static_cast(std::distance(outputData.begin(), std::max_element(outputData.begin(), outputData.end()))); ++ std::fstream file("/usr/share/arm/armnn/models/labels.txt"); ++ //std::string predict_target_name; ++ for (int i=0; i <= label; i++) ++ { ++ std::getline(file, predict_target_name_n); ++ } ++ //get the probability of the prediction ++ float prob = 100*outputData.data()[label]; ++ //clean the top one so as to find the second top prediction ++ outputData.data()[label] = 0; ++ ++ std::cout << "Top(" << (ii+1) << ") prediction is " << predict_target_name_n << " with confidence: " << prob << "%" << std::endl; ++ } ++ std::cout << "Performance (FPS): " << preformance_ret << std::endl; ++ ++ if (input_type != INPUT_IMAGE) ++ { ++ //convert image format back to BGR for OpenCV imshow from RGB format required by armnn. ++ cv::cvtColor(test_image, test_image, CV_RGB2BGR); ++ // output identified object name on top of input image ++ cv::putText(test_image, predict_target_name, ++ cv::Point(rectCrop.x + 5,rectCrop.y + 20), // Coordinates ++ cv::FONT_HERSHEY_COMPLEX_SMALL, // Font ++ 1.0, // Scale. 2.0 = 2x bigger ++ cv::Scalar(0,0,255), // Color ++ 1, // Thickness ++ 8); // Line type ++ ++ // output preformance in FPS on top of input image ++ std::string preformance_ret_string = "Performance (FPS): " + boost::lexical_cast(preformance_ret); ++ cv::putText(test_image, preformance_ret_string, ++ cv::Point(rectCrop.x + 5,rectCrop.y + 40), // Coordinates ++ cv::FONT_HERSHEY_COMPLEX_SMALL, // Font ++ 1.0, // Scale. 2.0 = 2x bigger ++ cv::Scalar(0,0,255), // Color ++ 1, // Thickness ++ 8); // Line type ++ ++ cv::imshow("ARMNN MobileNet Example", test_image); ++ waitKey(2); ++ } ++ } ++ } ++ catch (armnn::Exception const& e) ++ { ++ BOOST_LOG_TRIVIAL(fatal) << "Armnn Error: " << e.what(); ++ return EXIT_FAILURE; ++ } ++ return EXIT_SUCCESS; ++} ++ ++// This will run a test ++int RunTest(const std::string& modelFormat, ++ const std::string& inputTensorShapeStr, ++ const vector& computeDevice, ++ const std::string& modelPath, ++ const std::string& inputName, ++ const std::string& inputTensorDataFilePath, ++ const std::string& outputName, ++ bool enableProfiling, ++ const size_t subgraphId, ++ const std::shared_ptr& runtime = nullptr) ++{ ++ // Parse model binary flag from the model-format string we got from the command-line ++ bool isModelBinary; ++ if (modelFormat.find("bin") != std::string::npos) ++ { ++ isModelBinary = true; ++ } ++ else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos) ++ { ++ isModelBinary = false; ++ } ++ else ++ { ++ BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'binary' or 'text'"; ++ return EXIT_FAILURE; ++ } ++ ++ // Parse input tensor shape from the string we got from the command-line. ++ std::unique_ptr inputTensorShape; ++ if (!inputTensorShapeStr.empty()) ++ { ++ std::stringstream ss(inputTensorShapeStr); ++ std::vector dims = ParseArray(ss); ++ try ++ { ++ // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught. ++ inputTensorShape = std::make_unique(dims.size(), dims.data()); ++ } ++ catch (const armnn::InvalidArgumentException& e) ++ { ++ BOOST_LOG_TRIVIAL(fatal) << "Cannot create tensor shape: " << e.what(); ++ return EXIT_FAILURE; ++ } ++ } ++ // Forward to implementation based on the parser type ++ if (modelFormat.find("caffe") != std::string::npos) ++ { ++#if defined(ARMNN_CAFFE_PARSER) ++ return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, ++ inputName.c_str(), inputTensorShape.get(), ++ inputTensorDataFilePath.c_str(), outputName.c_str(), ++ enableProfiling, subgraphId, runtime); ++#else ++ BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support."; ++ return EXIT_FAILURE; ++#endif ++ } ++ else if (modelFormat.find("onnx") != std::string::npos) ++ { ++#if defined(ARMNN_ONNX_PARSER) ++ return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, ++ inputName.c_str(), inputTensorShape.get(), ++ inputTensorDataFilePath.c_str(), outputName.c_str(), ++ enableProfiling, subgraphId, runtime); ++#else ++ BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support."; ++ return EXIT_FAILURE; ++#endif ++ } ++ else if (modelFormat.find("tensorflow") != std::string::npos) ++ { ++#if defined(ARMNN_TF_PARSER) ++ return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, ++ inputName.c_str(), inputTensorShape.get(), ++ inputTensorDataFilePath.c_str(), outputName.c_str(), ++ enableProfiling, subgraphId, runtime); ++#else ++ BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support."; ++ return EXIT_FAILURE; ++#endif ++ } ++ else if(modelFormat.find("tflite") != std::string::npos) ++ { ++#if defined(ARMNN_TF_LITE_PARSER) ++ if (! isModelBinary) ++ { ++ BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Only 'binary' format supported \ ++ for tflite files"; ++ return EXIT_FAILURE; ++ } ++ return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, ++ inputName.c_str(), inputTensorShape.get(), ++ inputTensorDataFilePath.c_str(), outputName.c_str(), ++ enableProfiling, subgraphId, runtime); ++#else ++ BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << ++ "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'"; ++ return EXIT_FAILURE; ++#endif ++ } ++ else ++ { ++ BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << ++ "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'"; ++ return EXIT_FAILURE; ++ } ++} ++ ++int main(int argc, const char* argv[]) ++{ ++ // Configures logging for both the ARMNN library and this test program. ++#ifdef NDEBUG ++ armnn::LogSeverity level = armnn::LogSeverity::Info; ++#else ++ armnn::LogSeverity level = armnn::LogSeverity::Debug; ++#endif ++ armnn::ConfigureLogging(true, true, level); ++ ++ std::string testCasesFile; ++ ++ std::string modelFormat = "tensorflow-binary"; ++ std::string modelPath = "/usr/share/arm/armnn/models/mobilenet_v1_1.0_224_frozen.pb"; ++ std::string inputName = "input"; ++ std::string inputTensorShapeStr = "1 224 224 3"; ++ std::string inputTensorDataFilePath = "/usr/share/arm/armnn/testvecs/test2.mp4"; ++ std::string outputName = "MobilenetV1/Predictions/Reshape_1"; ++ std::vector computeDevices = {armnn::Compute::CpuAcc}; ++ // Catch ctrl-c to ensure a clean exit ++ signal(SIGABRT, exit); ++ signal(SIGTERM, exit); ++ ++ if (argc == 1) ++ { ++ return RunTest(modelFormat, inputTensorShapeStr, computeDevices, ++ modelPath, inputName, inputTensorDataFilePath, outputName, false, 1000); ++ } ++ else ++ { ++ size_t subgraphId = 0; ++ po::options_description desc("Options"); ++ try ++ { ++ desc.add_options() ++ ("help", "Display usage information") ++ ("test-cases,t", po::value(&testCasesFile), "Path to a CSV file containing test cases to run. " ++ "If set, further parameters -- with the exception of compute device and concurrency -- will be ignored, " ++ "as they are expected to be defined in the file for each test in particular.") ++ ("concurrent,n", po::bool_switch()->default_value(false), ++ "Whether or not the test cases should be executed in parallel") ++ ("model-format,f", po::value(&modelFormat), ++ "caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or tensorflow-text.") ++ ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .caffemodel, .prototxt," ++ " .tflite, .onnx") ++ ("compute,c", po::value>()->multitoken(), ++ "The preferred order of devices to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc") ++ ("input-name,i", po::value(&inputName), "Identifier of the input tensor in the network.") ++ ("input-tensor-shape,s", po::value(&inputTensorShapeStr), ++ "The shape of the input tensor in the network as a flat array of integers separated by whitespace. " ++ "This parameter is optional, depending on the network.") ++ ("input-tensor-data,d", po::value(&inputTensorDataFilePath), ++ "Input test file name. It can be image/video clip file name or use 'camera_live_input' to select camera input.") ++ ("output-name,o", po::value(&outputName), "Identifier of the output tensor in the network.") ++ ("event-based-profiling,e", po::bool_switch()->default_value(false), ++ "Enables built in profiler. If unset, defaults to off.") ++ ("number_frame", po::value(&subgraphId)->default_value(1), "Number of frames to process."); ++ } ++ catch (const std::exception& e) ++ { ++ // Coverity points out that default_value(...) can throw a bad_lexical_cast, ++ // and that desc.add_options() can throw boost::io::too_few_args. ++ // They really won't in any of these cases. ++ BOOST_ASSERT_MSG(false, "Caught unexpected exception"); ++ BOOST_LOG_TRIVIAL(fatal) << "Fatal internal error: " << e.what(); ++ return EXIT_FAILURE; ++ } ++ ++ // Parses the command-line. ++ po::variables_map vm; ++ try ++ { ++ po::store(po::parse_command_line(argc, argv, desc), vm); ++ po::notify(vm); ++ } ++ catch (const po::error& e) ++ { ++ std::cerr << e.what() << std::endl << std::endl; ++ std::cerr << desc << std::endl; ++ return EXIT_FAILURE; ++ } ++ ++ // Run single test ++ // Get the preferred order of compute devices. ++ std::vector computeDevices = vm["compute"].as>(); ++ bool enableProfiling = vm["event-based-profiling"].as(); ++ ++ // Remove duplicates from the list of compute devices. ++ RemoveDuplicateDevices(computeDevices); ++ ++ return RunTest(modelFormat, inputTensorShapeStr, computeDevices, ++ modelPath, inputName, inputTensorDataFilePath, outputName, enableProfiling, subgraphId); ++ } ++} ++ +-- +1.9.1 + diff --git a/0009-command-line-options-for-video-port-selection.patch b/0009-command-line-options-for-video-port-selection.patch new file mode 100644 index 0000000..bb6b505 --- /dev/null +++ b/0009-command-line-options-for-video-port-selection.patch @@ -0,0 +1,60 @@ +From ee152f3b68f91c5fff336306d011becdcf3a6b17 Mon Sep 17 00:00:00 2001 +From: Djordje Senicic +Date: Sat, 24 Aug 2019 17:58:38 -0400 +Subject: [PATCH] command line options for video port selection + +- Add command line selection <0|1|2|3> of video port used for live camera input + +Upstream-Status: Inappropriate [TI only test code] + +Signed-off-by: Djordje Senicic +--- + tests/ArmnnExamples/ArmnnExamples.cpp | 23 ++++++++++++++++++++--- + 1 file changed, 20 insertions(+), 3 deletions(-) + +diff --git a/tests/ArmnnExamples/ArmnnExamples.cpp b/tests/ArmnnExamples/ArmnnExamples.cpp +index 638fc145..d1526539 100644 +--- a/tests/ArmnnExamples/ArmnnExamples.cpp ++++ b/tests/ArmnnExamples/ArmnnExamples.cpp +@@ -316,10 +316,27 @@ int MainImpl(const char* modelPath, + int input_type = INPUT_IMAGE; + std::string filename = inputTensorDataFilePath; + +- size_t i = filename.rfind("camera_live_input", filename.length()); ++ size_t i = filename.rfind("camera_live_input", filename.length()); + if (i != string::npos) + { +- cap = VideoCapture(1); ++ int vport = 1; ++ size_t loc_i = filename.rfind("camera_live_input0", filename.length()); ++ if(loc_i != string::npos) vport = 0; ++ else { ++ loc_i = filename.rfind("camera_live_input1", filename.length()); ++ if(loc_i != string::npos) vport = 1; ++ else { ++ loc_i = filename.rfind("camera_live_input2", filename.length()); ++ if(loc_i != string::npos) vport = 2; ++ else { ++ loc_i = filename.rfind("camera_live_input3", filename.length()); ++ if(loc_i != string::npos) vport = 3; ++ else std::cout << "Setting ports beyond 3 not supported - using default!" << std::endl; ++ } ++ } ++ } ++ std::cout << "Using video" << vport << std::endl; ++ cap = VideoCapture(vport); + namedWindow("ARMNN MobileNet Example", WINDOW_AUTOSIZE | CV_GUI_NORMAL); + input_type = INPUT_CAMERA; //camera input + } +@@ -609,7 +626,7 @@ int main(int argc, const char* argv[]) + "The shape of the input tensor in the network as a flat array of integers separated by whitespace. " + "This parameter is optional, depending on the network.") + ("input-tensor-data,d", po::value(&inputTensorDataFilePath), +- "Input test file name. It can be image/video clip file name or use 'camera_live_input' to select camera input.") ++ "Input test file name. It can be image/video clip file name or 'camera_live_input or camera_live_input<0|1|2|3>' to select camera input.") + ("output-name,o", po::value(&outputName), "Identifier of the output tensor in the network.") + ("event-based-profiling,e", po::bool_switch()->default_value(false), + "Enables built in profiler. If unset, defaults to off.") +-- +2.17.1 + diff --git a/0010-armnnexamples-update-for-19.08-modifications.patch b/0010-armnnexamples-update-for-19.08-modifications.patch new file mode 100644 index 0000000..60f36a4 --- /dev/null +++ b/0010-armnnexamples-update-for-19.08-modifications.patch @@ -0,0 +1,28 @@ +From a3e266a2de7c45116428f4e21645a2657534191b Mon Sep 17 00:00:00 2001 +From: Djordje Senicic +Date: Mon, 26 Aug 2019 03:51:39 -0400 +Subject: [PATCH] armnnexamples: update for 19.08 modifications + +Upstream-Status: Inappropriate [TI only test code] + +Signed-off-by: Djordje Senicic +--- + tests/ArmnnExamples/ArmnnExamples.cpp | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/ArmnnExamples/ArmnnExamples.cpp b/tests/ArmnnExamples/ArmnnExamples.cpp +index d1526539..c10a4fc0 100644 +--- a/tests/ArmnnExamples/ArmnnExamples.cpp ++++ b/tests/ArmnnExamples/ArmnnExamples.cpp +@@ -310,7 +310,7 @@ int MainImpl(const char* modelPath, + params.m_OutputBindings = { outputName }; + //params.m_EnableProfiling = enableProfiling; + params.m_SubgraphId = 0; +- InferenceModel model(params, enableProfiling, runtime); ++ InferenceModel model(params, enableProfiling, "", runtime); + + VideoCapture cap; + int input_type = INPUT_IMAGE; +-- +2.17.1 + diff --git a/_constraints b/_constraints new file mode 100644 index 0000000..3ce911a --- /dev/null +++ b/_constraints @@ -0,0 +1,10 @@ + + + + 6 + + + 4 + + + diff --git a/_multibuild b/_multibuild new file mode 100644 index 0000000..2b219ad --- /dev/null +++ b/_multibuild @@ -0,0 +1,3 @@ + + opencl + diff --git a/armnn-24.05.tar.gz b/armnn-24.05.tar.gz new file mode 100644 index 0000000..e8f7f13 --- /dev/null +++ b/armnn-24.05.tar.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b06c0b03d1447fb1c9222a8dc1f7fc0aac8dbd9defdf67087de2d305b1dbd323 +size 28629675 diff --git a/armnn-24.08.tar.gz b/armnn-24.08.tar.gz new file mode 100644 index 0000000..82d2a31 --- /dev/null +++ b/armnn-24.08.tar.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6af3453b6a0238f9734bbeb13e006f07f7a7a459a978a21423555819415fa328 +size 28695424 diff --git a/armnn-fix-armv7.patch b/armnn-fix-armv7.patch new file mode 100644 index 0000000..3a33d9d --- /dev/null +++ b/armnn-fix-armv7.patch @@ -0,0 +1,18 @@ +--- armnn-24.08/include/armnn/Numpy.hpp.orig 2024-09-06 11:05:16.800066800 +0200 ++++ armnn-24.08/include/armnn/Numpy.hpp 2024-09-06 11:05:58.717592900 +0200 +@@ -157,7 +157,7 @@ namespace armnnNumpy + inline void CreateHeader(std::ifstream& ifStream, HeaderInfo& headerInfo, Header& header) + { + char stringBuffer[headerInfo.m_HeaderLen]; +- ifStream.read(stringBuffer, headerInfo.m_HeaderLen); ++ ifStream.read(stringBuffer, static_cast(headerInfo.m_HeaderLen)); + + header.m_HeaderString = std::string(stringBuffer, headerInfo.m_HeaderLen); + // Remove new line character at the end of the string +@@ -403,4 +403,4 @@ namespace armnnNumpy + } + } + +-#endif // NUMPY_HPP +\ No newline at end of file ++#endif // NUMPY_HPP diff --git a/armnn-fix_find_opencv.patch b/armnn-fix_find_opencv.patch new file mode 100644 index 0000000..68d6b94 --- /dev/null +++ b/armnn-fix_find_opencv.patch @@ -0,0 +1,19 @@ +--- armnn-19.08.orig/tests/CMakeLists.txt 2019-10-17 09:11:02.836949176 +0200 ++++ armnn-19.08/tests/CMakeLists.txt 2019-10-17 09:10:50.384869262 +0200 +@@ -1,6 +1,3 @@ +-find_package( OpenCV REQUIRED ) +-include_directories( ${OpenCV_INCLUDE_DIRS} ) +- + # UnitTests + include(CheckIncludeFiles) + +@@ -368,6 +365,9 @@ if(BUILD_ARMNN_QUANTIZER) + endif() + + if (BUILD_ARMNN_EXAMPLES) ++ find_package( OpenCV REQUIRED ) ++ include_directories( ${OpenCV_INCLUDE_DIRS} ) ++ + set(ArmnnExamples_sources + ArmnnExamples/ArmnnExamples.cpp) + diff --git a/armnn-rpmlintrc b/armnn-rpmlintrc new file mode 100644 index 0000000..2eac815 --- /dev/null +++ b/armnn-rpmlintrc @@ -0,0 +1,4 @@ +# This line is mandatory to access the configuration functions +from Config import * + +addFilter(".*opencl.* shlib-policy-name-error") diff --git a/armnn.changes b/armnn.changes new file mode 100644 index 0000000..f781228 --- /dev/null +++ b/armnn.changes @@ -0,0 +1,597 @@ +------------------------------------------------------------------- +Fri Sep 6 13:29:47 UTC 2024 - Guillaume GARDET + +- Add patch to fix build on armv7: + * armnn-fix-armv7.patch + +------------------------------------------------------------------- +Tue Sep 3 09:59:35 UTC 2024 - Guillaume GARDET + +- Update to 24.08: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v24.08 + +------------------------------------------------------------------- +Tue Jun 18 07:40:01 UTC 2024 - Guillaume GARDET + +- Update to 24.05: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v24.05 + +------------------------------------------------------------------- +Wed Apr 3 09:21:48 UTC 2024 - Guillaume GARDET + +- Update to 24.02: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v24.02 + +------------------------------------------------------------------- +Tue Feb 20 10:17:12 UTC 2024 - Dominique Leuenberger + +- Use %patch -P N instead of deprecated %patchN. + +------------------------------------------------------------------- +Thu Dec 7 09:09:13 UTC 2023 - Guillaume GARDET + +- Update to 23.11: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v23.11 + +------------------------------------------------------------------- +Tue Sep 5 14:29:53 UTC 2023 - Guillaume GARDET + +- Update to 23.08: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v23.08 + +------------------------------------------------------------------- +Mon Jun 12 12:28:32 UTC 2023 - Guillaume GARDET + +- Update to 23.05: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v23.05 +- Drop upstream patches: + * armnn-fix-gcc13.patch + * armnn-fix-gcc13-2.patch + * 4cf40d7.diff + +------------------------------------------------------------------- +Thu Apr 20 12:48:18 UTC 2023 - Guillaume GARDET + +- Add additionnal gcc13 fixes: + * 4cf40d7.diff + * armnn-fix-gcc13-2.patch + +------------------------------------------------------------------- +Wed Apr 5 06:06:27 UTC 2023 - Guillaume GARDET + +- Update armnn-fix-gcc13.patch with upstream patch + +------------------------------------------------------------------- +Tue Mar 21 10:45:26 UTC 2023 - Guillaume GARDET + +- Add patch to fix build with GCC13: + * armnn-fix-gcc13.patch + +------------------------------------------------------------------- +Wed Mar 15 09:03:42 UTC 2023 - Guillaume GARDET + +- Update to 23.02: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v23.02 +- Drop upstream patches: + * armnn-gh711.patch + * armnn-281e97b.patch + +------------------------------------------------------------------- +Wed Dec 14 15:41:22 UTC 2022 - Guillaume GARDET + +- tensorflow-lite >= 2.10 is only avaialble on Tumbleweed + +------------------------------------------------------------------- +Wed Dec 14 15:38:39 UTC 2022 - Guillaume GARDET + +- Add patch to use static libraries not object libraries for + support library: + * armnn-281e97b.patch + +------------------------------------------------------------------- +Mon Nov 28 07:40:40 UTC 2022 - Guillaume GARDET + +- Update to 22.11: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v22.11 + * Add libarmnnTestUtils +- Add patch to fix build: + * armnn-gh711.patch + +------------------------------------------------------------------- +Tue Aug 30 09:19:52 UTC 2022 - Guillaume GARDET + +- Update to 22.08: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v22.08 +- Drop upstream patch: + * armnn-fix-gcc12.patch + +------------------------------------------------------------------- +Tue May 31 13:31:55 UTC 2022 - Guillaume GARDET + +- Update to 22.05: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v22.05 +- Add patch to fix build with GCC12 and new flatbuffers: + * armnn-fix-gcc12.patch + +------------------------------------------------------------------- +Tue Mar 15 13:06:29 UTC 2022 - Guillaume GARDET + +- Update to 22.02: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v22.02 + * Add libarmnnTestUtils + +------------------------------------------------------------------- +Wed Dec 1 10:03:31 UTC 2021 - Guillaume GARDET + +- Update to 21.11: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v21.11 + * Main changes: + - New capabilities and improve performance + - ABI/API Changes + The following front-end API changes have occurred during the + implementation of 21.11 that users should be aware of before + upgrading. Due to these changes we have bumped our ARMNN_VERSION + to 27.0.0, the Delegate to 25.0.0 and also bumping + our Parsers to 24.3.0 following Semantic Versioning guidelines. +- Drop upstream patches: + * 96beb97.diff + * febc20f.diff + * e118e04.diff + * 0011-update-doctest-for-glibc2.34.patch + +------------------------------------------------------------------- +Mon Oct 25 07:21:27 UTC 2021 - Guillaume GARDET + +- Add upstream patch to fix stringop-overread error: + * e118e04.diff + +------------------------------------------------------------------- +Thu Oct 21 15:24:42 UTC 2021 - Guillaume GARDET + +- Add upstream patch to fix uninitialized var error: + * febc20f.diff +- Remove most -Wno-error* flags which are not needed anymore + +------------------------------------------------------------------- +Wed Oct 20 08:41:14 UTC 2021 - Atri Bhattacharya + +- Add 0011-update-doctest-for-glibc2.34.patch: Update doctest to + version 2.4.6 to allow Arm NN to build with glibc 2.34; patch + taken from upstream commits 13d2e0d and 4ec6d42and rebased for + current version. + +------------------------------------------------------------------- +Thu Sep 9 08:04:50 UTC 2021 - Guillaume GARDET + +- Update to 21.08: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v21.08 + * Main changes: + - new capabilities and improve performance: + * Added the ability to import protected DMA Buffers and + allow Arm NN to run inferences that are in Protected GPU Memory. + As well as providing Custom Memory Allocator which supports + importing malloc, Dma_buf and protected Dma buffers. + * Users with multi core NPUs has been given the ability to pin + inferences to selected cores giving them the ability to balance + parallel workloads across the NPU and increase throughput. + * Boost has been completely removed from the code base making + Arm NN easier to integrate into other software stacks. + * Added support for non-constant weights and biases on + FullyConnected which lay the groundwork for supporting more models. + * More operators supported on Arm NN, TfLite Parser, TfLite + Delegate and Android NNAPI driver. +- Drop upstream patch: + * armnn-fix-include.patch + +------------------------------------------------------------------- +Fri Jul 16 08:23:43 UTC 2021 - Guillaume GARDET + +- Add '-Wno-error=stringop-overread' to workaround build error + with GCC11 with openCL enabled + +------------------------------------------------------------------- +Tue Jul 13 08:07:14 UTC 2021 - Guillaume GARDET + +- Add '-Wno-error=uninitialized -Wno-error=array-bounds' to + workaround build issues with GCC11 - https://github.com/ARM-software/armnn/issues/548 + +------------------------------------------------------------------- +Tue May 25 06:49:01 UTC 2021 - Guillaume GARDET + +- Fix libarmnnOnnxParser version + +------------------------------------------------------------------- +Fri May 21 06:55:48 UTC 2021 - Guillaume GARDET + +- Update to 21.05: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v21.05 + * Main changes: + - new capabilities to allow users attain higher performance by: + * Making the Arm NN Core thread safe opening the possibility of + running multiple inferences on the same model in parallel + software threads. + * Allowing graphs on the GPU backend import their input and + output buffers either from correctly aligned main memory or + from kernel memory exposed as a dma_buf, thus reducing memory + usage and saving the time involved in copying data into and + out of the GPU memory space. + - In addition to this, support was added to allow the MobileBERT + network to be parsed and run. + - Finally three deprecated components: the Tensorflow Parser, + the Caffe Parser and the Arm NN Quantizer tool, were removed +- Add patch to fix include path: + * armnn-fix-include.patch +- Disable armnn-extratests as it fails to build with current version + +------------------------------------------------------------------- +Mon Mar 29 12:17:08 UTC 2021 - Guillaume GARDET + +- Update to 21.02: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v21.02 +- Drop upstream patch: + * c5c40fe.diff + +------------------------------------------------------------------- +Mon Feb 15 09:48:06 UTC 2021 - Guillaume GARDET + +- Fix instalaltion of cmake files with an upstream patch: + * c5c40fe.diff + +------------------------------------------------------------------- +Mon Jan 25 08:21:39 UTC 2021 - Guillaume GARDET + +- Backport patch to fix tflite parser when built against + tensorflow 2.4: + * 96beb97.diff + +------------------------------------------------------------------- +Wed Dec 9 14:33:19 UTC 2020 - Guillaume GARDET + +- Update to 20.11: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v20.11 +- Refresh patch to handle boost::variant to mapbox::util::variant update: + * 0006-armnn-mobilenet-test-example.patch + +------------------------------------------------------------------- +Fri Sep 4 16:07:09 UTC 2020 - Guillaume GARDET + +- Add python support, disabled for now as it does not install + files atm + +------------------------------------------------------------------- +Fri Sep 4 14:56:36 UTC 2020 - Guillaume GARDET + +- Package libtimelineDecoderJson + +------------------------------------------------------------------- +Thu Sep 3 11:18:40 UTC 2020 - Guillaume GARDET + +- Update to 20.08: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v20.08 +- Refresh patch: + * 0005-add-armnn-mobilenet-test-example.patch +- Drop upstream patch: + * armnn-fix-catch.patch +- Disable ONNX on Tumbleweed since there is a compatibility issue + with ONNX 1.7.0, see: https://github.com/ARM-software/armnn/issues/419 + +------------------------------------------------------------------- +Wed Jun 17 07:19:10 UTC 2020 - Guillaume GARDET + +- Build only where ComputeLibrary is built: + aarch64 armv7 and x86_64 + +------------------------------------------------------------------- +Thu Jun 4 09:51:23 UTC 2020 - Guillaume GARDET + +- Unify Leap/SLE options +- Update to 20.05: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v20.05 +- Drop upstream patch: + * armnn-enable-use-of-arm-compute-shared-library.patch +- Refresh patch: + * 0003-add-more-test-command-line-arguments.patch +- Add new patch to fix build with gcc10: + * armnn-fix-catch.patch +- Package new libtimelineDecode and libarmnnBasePipeServer + +------------------------------------------------------------------- +Thu May 28 15:10:11 UTC 2020 - Guillaume GARDET + +- Apply boost link fixes only for armnn_extra_tests +- Apply patches for armnn_extra_tests only when + armnn_extra_tests is built + +------------------------------------------------------------------- +Tue Mar 31 15:23:29 UTC 2020 - Guillaume GARDET + +- Disable RPATH to fix build on Leap15.2/SLE15SP2 + +------------------------------------------------------------------- +Tue Mar 24 14:01:29 UTC 2020 - Guillaume GARDET + +- Add '-Wno-error=deprecated-declarations' to fix build with latest + protobuf 3.11.x (Tumbleweed) + https://github.com/ARM-software/armnn/issues/366 +- Use python-onnx package instead of python-onnx-devel, since it + is now a single package. + +------------------------------------------------------------------- +Wed Mar 11 08:41:20 UTC 2020 - Guillaume GARDET + +- Fix build on Leap 15.2 + +------------------------------------------------------------------- +Mon Mar 9 17:47:42 UTC 2020 - Guillaume GARDET + +- Update to 20.02: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v20.02 + * Drop upstream patch: + - armnn-fix_include.patch + * Refresh patch: + - 0006-armnn-mobilenet-test-example.patch + * Replace downstream patch: + - 0007-enable-use-of-arm-compute-shared-library.patch + by upstream patch: + - armnn-enable-use-of-arm-compute-shared-library.patch + * Add 'vim' as BuildRequires since it needs 'xxd' tool +- Use TensorFlow 2 also on Leap 15.2 / SLE15SP2 + +------------------------------------------------------------------- +Mon Feb 10 13:01:53 UTC 2020 - Guillaume GARDET + +- Use Tensorflow 2.x only for Tumbleweed, as TensorFlow2 is not + available in Leap 15.2 +- Skip some tests if TensorFlow < 1.14 is used + +------------------------------------------------------------------- +Tue Jan 28 12:33:19 UTC 2020 - Guillaume GARDET + +- Enable ONNX on Leap 15.2 + +------------------------------------------------------------------- +Tue Jan 28 09:48:55 UTC 2020 - Guillaume GARDET + +- Use tensorflow2 as armnn 19.11 now requires TensorFlow >= 1.14 + +------------------------------------------------------------------- +Mon Jan 20 15:21:09 UTC 2020 - Guillaume GARDET + +- Disable ArmnnConverter and ArmnnQuantizer packaging by default + as it requires libQuantizer.so to be packaged, which is not yet + +------------------------------------------------------------------- +Tue Jan 14 13:07:58 UTC 2020 - Guillaume GARDET + +- Package ArmnnConverter and ArmnnQuantizer + +------------------------------------------------------------------- +Tue Jan 14 11:59:14 UTC 2020 - Dominique Leuenberger + +- ExcludeArch %ix86: Parts of the build deps (tensorflow/bazel) are + not supported on ix86. + +------------------------------------------------------------------- +Fri Dec 6 13:45:57 UTC 2019 - Guillaume GARDET + +- Update to 19.11: + * Changelog: https://github.com/ARM-software/armnn/releases/tag/v19.11 +- Remove upstreamed patches: + * armnn-generate-versioned-library.patch + * armnn-fix_boost.patch + * armnn-fix_arm32_dep.patch + * armnn-fix_arm32.patch +- Add patch to fix include: + * armnn-fix_include.patch + +------------------------------------------------------------------- +Mon Nov 25 14:16:29 UTC 2019 - Guillaume GARDET + +- tensorflow-devel package now includes *.pb.* files, so + remove TensorFlow parser build workaround: + * armnn-fix_tensorflow_link.patch + +------------------------------------------------------------------- +Mon Oct 28 15:38:27 UTC 2019 - Guillaume GARDET + +- Add a conflict between armnn-extratests and armnn-opencl-extratests + +------------------------------------------------------------------- +Mon Oct 28 13:06:56 UTC 2019 - Guillaume GARDET + +- Add a conflict between armnn-devel and armnn-opencl-devel + +------------------------------------------------------------------- +Mon Oct 28 09:56:16 UTC 2019 - Guillaume GARDET + +- Enable ONNX for Tumbleweed + +------------------------------------------------------------------- +Thu Oct 17 06:57:00 UTC 2019 - Guillaume GARDET + +- Add downstream ArmnnExamples in a separate '-extratests' package + with patches: + * 0003-add-more-test-command-line-arguments.patch + * 0005-add-armnn-mobilenet-test-example.patch + * 0006-armnn-mobilenet-test-example.patch + * 0007-enable-use-of-arm-compute-shared-library.patch + * 0009-command-line-options-for-video-port-selection.patch + * 0010-armnnexamples-update-for-19.08-modifications.patch +- Fix build when extratests are disabled + * armnn-fix_find_opencv.patch + +------------------------------------------------------------------- +Mon Oct 7 13:00:59 UTC 2019 - Guillaume GARDET + +- Replace patch with an upstreamable version: + * armnn-generate-versioned-library.patch + +------------------------------------------------------------------- +Tue Oct 1 19:04:50 UTC 2019 - Guillaume GARDET + +- Add ONNX Parser support (disabled by default as there is no + official ONNX package yet) + +------------------------------------------------------------------- +Tue Oct 1 14:30:13 UTC 2019 - Guillaume GARDET + +- Re-enable Tensorflow on Tumbleweed as boo#1151150 is now fixed + +------------------------------------------------------------------- +Thu Sep 19 07:21:48 UTC 2019 - Guillaume GARDET + +- Update _constraints + +------------------------------------------------------------------- +Wed Sep 18 12:26:55 UTC 2019 - Guillaume GARDET + +- Enable openCL only on a separate flavor as runtime requires + libOpenCL.so, libGLES_mali.so, or libmali.so + +------------------------------------------------------------------- +Wed Sep 18 11:53:30 UTC 2019 - Guillaume GARDET + +- Enable openCL support +- Disable UnitTests when openCL support is enabled as it fails + on OBS workers + +------------------------------------------------------------------- +Wed Sep 18 09:57:39 UTC 2019 - Guillaume GARDET + +- Add patches to make UnitTests to pass on armv7: + * armnn-fix_arm32.patch + * armnn-fix_arm32_dep.patch + +------------------------------------------------------------------- +Thu Sep 5 11:22:41 UTC 2019 - Guillaume GARDET + +- Disable TensorFlow as on 15.1 only x86_64 succeed and on TW we + have incompatibility with protobuf (3.8.0 in TW and + Tensorflow uses 3.6.1 internally) - boo#1151150 + +------------------------------------------------------------------- +Thu Sep 5 07:25:16 UTC 2019 - Guillaume GARDET + +- Update to 19.08: +- Changelog: https://github.com/ARM-software/armnn/releases/tag/v19.08 +- Remove upstreamed patch: + * armnn-fix_quantizer_link.patch + * armnn-fix_caffe_parser_with_new_protobuf.patch +- Refresh patch: + * armnn-generate-versioned-library.patch +- Drop patches not needed anymore: + * armnn-remove_broken_std_move.patch + * armnn-fix_build_with_gcc9.patch + +------------------------------------------------------------------- +Wed Sep 4 16:15:58 UTC 2019 - Guillaume GARDET + +- Disable LTO until lto link is fixed + https://github.com/ARM-software/armnn/issues/251 + +------------------------------------------------------------------- +Tue Jun 18 11:54:48 UTC 2019 - Guillaume GARDET + +- Fix build in Tumbleweed, with latest protobuf: + * armnn-fix_caffe_parser_with_new_protobuf.patch + +------------------------------------------------------------------- +Tue Jun 11 14:17:34 UTC 2019 - Guillaume GARDET + +- Enable Tensorflow parser +- Fix link with Tensorflow: + * armnn-fix_tensorflow_link.patch + +------------------------------------------------------------------- +Wed Jun 5 09:07:12 UTC 2019 - Guillaume GARDET + +- Build and package libarmnnTfLiteParser +- Fix libarmnnQuantizer build with: + * armnn-fix_quantizer_link.patch + +------------------------------------------------------------------- +Tue Jun 4 09:22:24 UTC 2019 - Guillaume GARDET + +- Add _constraints to avoid OOM errors + +------------------------------------------------------------------- +Mon Jun 3 11:40:47 UTC 2019 - Guillaume GARDET + +- Update to 19.05: +- Changelog: https://github.com/ARM-software/armnn/releases/tag/v19.05 +- Remove upstreamed patch: + * armnn-fix_stb_include.patch +- Rebase patch: + * armnn-generate-versioned-library.patch +- Update patch: + * armnn-remove_broken_std_move.patch + +------------------------------------------------------------------- +Mon Jun 3 09:15:25 UTC 2019 - Guillaume GARDET + +- Fix build on Tumbleweed with: + * armnn-fix_build_with_gcc9.patch + +------------------------------------------------------------------- +Wed May 29 15:19:15 UTC 2019 - Guillaume GARDET + +- Fix build on Tumbleweed with: + * armnn-remove_broken_std_move.patch + +------------------------------------------------------------------- +Wed May 29 08:12:43 UTC 2019 - Guillaume GARDET + +- Call ldconfig in post/postun for libarmnnCaffeParser + +------------------------------------------------------------------- +Mon May 27 05:42:06 UTC 2019 - Guillaume GARDET + +- Enable and fix Caffe parser +- Use %cmake_build macro + +------------------------------------------------------------------- +Fri Apr 19 10:11:29 UTC 2019 - Guillaume GARDET + +- Add compute_cl option, disabled by default since check fails as + no openCL are enabled in OBS + +------------------------------------------------------------------- +Thu Apr 11 09:35:54 UTC 2019 - Guillaume GARDET + +- Split libarmnn and libarmnnSerializer to separate packages + +------------------------------------------------------------------- +Thu Apr 11 07:38:23 UTC 2019 - Guillaume GARDET + +- Add patch to have versionned libs: + * armnn-generate-versioned-library.patch +- Package versionned libs + +------------------------------------------------------------------- +Tue Apr 9 16:18:55 UTC 2019 - Guillaume GARDET + +- Enable NEON backend on AArch64 +- Add patch to enable use of shared lib for ComputeLibrary: + * 0007-enable-use-of-arm-compute-shared-library.patch + +------------------------------------------------------------------- +Tue Mar 26 14:29:39 UTC 2019 - Guillaume GARDET + +- Update to 19.02 +- Remove upstreamed patch: + * armnn-fix_catching_polymorphic_type.patch + +------------------------------------------------------------------- +Thu Jan 17 12:51:41 UTC 2019 - Guillaume GARDET + +- Update to 18.11 +- Add patch to fix build: + * armnn-fix_catching_polymorphic_type.patch + * armnn-fix_boost.patch + * armnn-fix_stb_include.patch + +------------------------------------------------------------------- +Thu Sep 13 12:51:53 UTC 2018 - Guillaume GARDET + +- Initial version 18.08 + diff --git a/armnn.spec b/armnn.spec new file mode 100644 index 0000000..d1ec4e6 --- /dev/null +++ b/armnn.spec @@ -0,0 +1,623 @@ +# +# spec file for package armnn +# +# Copyright (c) 2024 SUSE LLC +# +# All modifications and additions to the file contributed by third parties +# remain the property of their copyright owners, unless otherwise agreed +# upon. The license for this file, and modifications and additions to the +# file, is the same license as for the pristine package itself (unless the +# license for the pristine package is not an Open Source License, in which +# case the license is the MIT License). An "Open Source License" is a +# license that conforms to the Open Source Definition (Version 1.9) +# published by the Open Source Initiative. + +# Please submit bugfixes or comments via https://bugs.opensuse.org/ +# + + +# Disable LTO until UnitTests passes with LTO enabled - https://github.com/ARM-software/armnn/issues/623 +%define _lto_cflags %{nil} + +# Disable Python binding for now +%bcond_with PyArmnn + +%define target @BUILD_FLAVOR@%{nil} +%if "%{target}" != "" +%define package_suffix -%{target} +%endif +# Compute library has neon enabled for aarch64 only +%ifarch aarch64 +%bcond_without compute_neon +%else +%bcond_with compute_neon +%endif +%if "%{target}" == "opencl" +%bcond_without compute_cl +%else +%bcond_with compute_cl +%endif +# stb-devel is available on Leap 15.1+ +%if 0%{?suse_version} > 1500 || ( 0%{?sle_version} > 150000 && 0%{?is_opensuse} ) +%bcond_without armnn_tests +%else +%bcond_with armnn_tests +%endif +# Extra tests require opencv(3)-devel, but it is broken for Leap 15.1 - boo#1154091 +%if 0%{?suse_version} > 1500 || 0%{?sle_version} >= 150200 +# FIXME: disabled for now, as it fails since version 21.05 +%bcond_with armnn_extra_tests +%else +%bcond_with armnn_extra_tests +%endif +# flatbuffers-devel is available on Leap 15.2+/SLE15SP2+ +# But tensorflow-lite >= 2.10 is only avaialble on Tumbleweed +%if 0%{?suse_version} > 1500 +%bcond_without armnn_flatbuffers +%else +%bcond_with armnn_flatbuffers +%endif +# ONNX is available on Leap 15.2+/SLE15SP2+, but there is a compatibility issue +# with ONNX 1.7.0 in Tumbleweed - https://github.com/ARM-software/armnn/issues/419 +%if 0%{?sle_version} >= 150200 +%bcond_without armnn_onnx +%else +%bcond_with armnn_onnx +%endif +%define version_major 24 +%define version_minor 08 +%define version_lib 33 +%define version_lib_testutils 3 +%define version_lib_tfliteparser 24 +%define version_lib_onnxparser 24 +Name: armnn%{?package_suffix} +Version: %{version_major}.%{version_minor} +Release: 0 +Summary: Arm NN SDK enables machine learning workloads on power-efficient devices +License: MIT +Group: Development/Libraries/Other +URL: https://developer.arm.com/products/processors/machine-learning/arm-nn +Source0: https://github.com/ARM-software/armnn/archive/v%{version}.tar.gz#/armnn-%{version}.tar.gz +Source1: armnn-rpmlintrc +# PATCH-FIX-UPSTREAM - https://github.com/ARM-software/armnn/issues/786 +Patch1: armnn-fix-armv7.patch +# PATCHES to add downstream ArmnnExamples binary - https://layers.openembedded.org/layerindex/recipe/87610/ +Patch200: 0003-add-more-test-command-line-arguments.patch +Patch201: 0005-add-armnn-mobilenet-test-example.patch +Patch202: 0006-armnn-mobilenet-test-example.patch +Patch203: 0009-command-line-options-for-video-port-selection.patch +Patch204: 0010-armnnexamples-update-for-19.08-modifications.patch +Patch205: armnn-fix_find_opencv.patch +BuildRequires: ComputeLibrary-devel >= %{version_major}.%{version_minor} +BuildRequires: cmake >= 3.22 +BuildRequires: gcc-c++ +BuildRequires: protobuf-devel +BuildRequires: python-rpm-macros +BuildRequires: valgrind-devel +BuildRequires: vim +# Make armnn-opencl pulls lib*-opencl, and armnn pulls non opencl libs +Requires: libarmnn%{version_lib}%{?package_suffix} = %{version} +ExcludeArch: %ix86 +%if 0%{?suse_version} < 1330 +BuildRequires: boost-devel >= 1.59 +%else +BuildRequires: libboost_filesystem-devel >= 1.59 +BuildRequires: libboost_program_options-devel >= 1.59 +BuildRequires: libboost_system-devel >= 1.59 +BuildRequires: libboost_test-devel >= 1.59 +%if %{with armnn_extra_tests} +BuildRequires: libboost_log-devel >= 1.59 +BuildRequires: libboost_thread-devel >= 1.59 +%endif +%endif +%if %{with armnn_flatbuffers} +BuildRequires: flatbuffers-devel +%if 0%{?suse_version} > 1550 +BuildRequires: tensorflow-lite-devel >= 2.10 +%else +BuildRequires: tensorflow2-lite-devel >= 2.10 +%endif +%endif +%if %{with compute_cl} +# Mesa-libOpenCl is required for tests +BuildRequires: Mesa-libOpenCL +BuildRequires: ocl-icd-devel +BuildRequires: opencl-cpp-headers +BuildRequires: opencl-headers +%endif +%if %{with armnn_extra_tests} +%if 0%{?suse_version} > 1500 +BuildRequires: opencv3-devel +%else +BuildRequires: opencv-devel +%endif +%endif +%if %{with armnn_onnx} +BuildRequires: python3-onnx +%endif +%if %{with armnn_tests} +BuildRequires: stb-devel +%endif +%if %{with PyArmnn} +BuildRequires: python3-devel +BuildRequires: python3-wheel +BuildRequires: swig >= 4 +%endif +%if %{with compute_cl} +Recommends: Mesa-libOpenCL +%endif +%if %{with armnn_flatbuffers} +Requires: libarmnnSerializer%{version_lib}%{?package_suffix} = %{version} +Requires: libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} = %{version} +%endif +%if %{with armnn_onnx} +Requires: libarmnnOnnxParser%{version_lib_onnxparser}%{?package_suffix} = %{version} +%endif +# Make sure we do not install both openCL and non-openCL (CPU only) versions. +%if "%{target}" == "opencl" +Conflicts: armnn +%else +Conflicts: armnn-opencl +%endif +ExclusiveArch: aarch64 armv7l armv7hl x86_64 + +%description +Arm NN is an inference engine for CPUs, GPUs and NPUs. +It bridges the gap between existing NN frameworks and the underlying IP. +It enables efficient translation of existing neural network frameworks, +such as TensorFlow Lite, allowing them to run efficiently – without +modification – across Arm Cortex CPUs and Arm Mali GPUs. + +%package devel +Summary: Development headers and libraries for armnn +# Make sure we do not install both openCL and non-openCL (CPU only) versions. +Group: Development/Libraries/C and C++ +Requires: %{name} = %{version} +Requires: libarmnn%{version_lib}%{?package_suffix} = %{version} +Requires: libarmnnBasePipeServer%{version_lib}%{?package_suffix} = %{version} +Requires: libarmnnTestUtils%{version_lib_testutils}%{?package_suffix} +Requires: libtimelineDecoder%{version_lib}%{?package_suffix} = %{version} +Requires: libtimelineDecoderJson%{version_lib}%{?package_suffix} = %{version} +# Make sure we do not install both openCL and non-openCL (CPU only) versions. +%if "%{target}" == "opencl" +Conflicts: armnn-devel +%else +Conflicts: armnn-opencl-devel +%endif +%if %{with armnn_flatbuffers} +Requires: libarmnnSerializer%{version_lib}%{?package_suffix} = %{version} +Requires: libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} = %{version} +%endif +%if %{with armnn_onnx} +Requires: libarmnnOnnxParser%{version_lib_onnxparser}%{?package_suffix} = %{version} +%endif + +%description devel +Arm NN is an inference engine for CPUs, GPUs and NPUs. +It bridges the gap between existing NN frameworks and the underlying IP. +It enables efficient translation of existing neural network frameworks, +such as TensorFlow Lite, allowing them to run efficiently – without +modification – across Arm Cortex CPUs and Arm Mali GPUs. + +This package contains the development libraries and headers for armnn. + +%if %{with armnn_extra_tests} +%package -n %{name}-extratests +Summary: Additionnal downstream tests for Arm NN +# Make sure we do not install both openCL and non-openCL (CPU only) versions. +Group: Development/Libraries/C and C++ +Requires: %{name} +# Make sure we do not install both openCL and non-openCL (CPU only) versions. +%if "%{target}" == "opencl" +Conflicts: armnn-extratests +%else +Conflicts: armnn-opencl-extratests +%endif + +%description -n %{name}-extratests +Arm NN is an inference engine for CPUs, GPUs and NPUs. +It bridges the gap between existing NN frameworks and the underlying IP. +It enables efficient translation of existing neural network frameworks, +such as TensorFlow Lite, allowing them to run efficiently – without +modification – across Arm Cortex CPUs and Arm Mali GPUs. + +This package contains additionnal downstream tests for armnn. +%endif + +%package -n libarmnn%{version_lib}%{?package_suffix} +Summary: libarmnn from armnn +Group: Development/Libraries/C and C++ +%if "%{target}" == "opencl" +Conflicts: libarmnn%{version_lib} +%else +Conflicts: libarmnn%{version_lib}-opencl +%endif + +%description -n libarmnn%{version_lib}%{?package_suffix} +Arm NN is an inference engine for CPUs, GPUs and NPUs. +It bridges the gap between existing NN frameworks and the underlying IP. +It enables efficient translation of existing neural network frameworks, +such as TensorFlow Lite, allowing them to run efficiently – without +modification – across Arm Cortex CPUs and Arm Mali GPUs. + +This package contains the libarmnn library from armnn. + +%package -n libarmnnBasePipeServer%{version_lib}%{?package_suffix} +Summary: libarmnnBasePipeServer from armnn +Group: Development/Libraries/C and C++ +%if "%{target}" == "opencl" +Conflicts: libarmnnBasePipeServer%{version_lib} +%else +Conflicts: libarmnnBasePipeServer%{version_lib}-opencl +%endif + +%description -n libarmnnBasePipeServer%{version_lib}%{?package_suffix} +Arm NN is an inference engine for CPUs, GPUs and NPUs. +It bridges the gap between existing NN frameworks and the underlying IP. +It enables efficient translation of existing neural network frameworks, +such as TensorFlow Lite, allowing them to run efficiently – without +modification – across Arm Cortex CPUs and Arm Mali GPUs. + +This package contains the libarmnnBasePipeServer library from armnn. + +%package -n libarmnnTestUtils%{version_lib_testutils}%{?package_suffix} +Summary: libarmnnTestUtils from armnn +Group: Development/Libraries/C and C++ +%if "%{target}" == "opencl" +Conflicts: libarmnnTestUtils%{version_lib_testutils} +%else +Conflicts: libarmnnTestUtils%{version_lib_testutils}-opencl +%endif + +%description -n libarmnnTestUtils%{version_lib_testutils}%{?package_suffix} +Arm NN is an inference engine for CPUs, GPUs and NPUs. +It bridges the gap between existing NN frameworks and the underlying IP. +It enables efficient translation of existing neural network frameworks, +such as TensorFlow Lite, allowing them to run efficiently – without +modification – across Arm Cortex CPUs and Arm Mali GPUs. + +This package contains the libarmnnTestUtils library from armnn. + +%package -n libtimelineDecoder%{version_lib}%{?package_suffix} +Summary: libtimelineDecoder from armnn +Group: Development/Libraries/C and C++ +%if "%{target}" == "opencl" +Conflicts: libtimelineDecoder%{version_lib} +%else +Conflicts: libtimelineDecoder%{version_lib}-opencl +%endif + +%description -n libtimelineDecoder%{version_lib}%{?package_suffix} +Arm NN is an inference engine for CPUs, GPUs and NPUs. +It bridges the gap between existing NN frameworks and the underlying IP. +It enables efficient translation of existing neural network frameworks, +such as TensorFlow Lite, allowing them to run efficiently – without +modification – across Arm Cortex CPUs and Arm Mali GPUs. + +This package contains the libtimelineDecoder library from armnn. + +%package -n libtimelineDecoderJson%{version_lib}%{?package_suffix} +Summary: libtimelineDecoderJson from armnn +Group: Development/Libraries/C and C++ +%if "%{target}" == "opencl" +Conflicts: libtimelineDecoderJson%{version_lib} +%else +Conflicts: libtimelineDecoderJson%{version_lib}-opencl +%endif + +%description -n libtimelineDecoderJson%{version_lib}%{?package_suffix} +Arm NN is an inference engine for CPUs, GPUs and NPUs. +It bridges the gap between existing NN frameworks and the underlying IP. +It enables efficient translation of existing neural network frameworks, +such as TensorFlow Lite, allowing them to run efficiently – without +modification – across Arm Cortex CPUs and Arm Mali GPUs. + +This package contains the libtimelineDecoder library from armnn. + +%if %{with armnn_flatbuffers} +%package -n libarmnnSerializer%{version_lib}%{?package_suffix} +Summary: libarmnnSerializer from armnn +Group: Development/Libraries/C and C++ +%if "%{target}" == "opencl" +Conflicts: libarmnnSerializer%{version_lib} +%else +Conflicts: libarmnnSerializer%{version_lib}-opencl +%endif + +%description -n libarmnnSerializer%{version_lib}%{?package_suffix} +Arm NN is an inference engine for CPUs, GPUs and NPUs. +It bridges the gap between existing NN frameworks and the underlying IP. +It enables efficient translation of existing neural network frameworks, +such as TensorFlow Lite, allowing them to run efficiently – without +modification – across Arm Cortex CPUs and Arm Mali GPUs. + +This package contains the libarmnnSerializer library from armnn. + +%package -n libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} +Summary: libarmnnTfLiteParser from armnn +Group: Development/Libraries/C and C++ +%if "%{target}" == "opencl" +Conflicts: libarmnnTfLiteParser%{version_lib_tfliteparser} +%else +Conflicts: libarmnnTfLiteParser%{version_lib_tfliteparser}-opencl +%endif + +%description -n libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} +Arm NN is an inference engine for CPUs, GPUs and NPUs. +It bridges the gap between existing NN frameworks and the underlying IP. +It enables efficient translation of existing neural network frameworks, +such as TensorFlow Lite, allowing them to run efficiently – without +modification – across Arm Cortex CPUs and Arm Mali GPUs. + +This package contains the libarmnnTfLiteParser library from armnn. +%endif + +%if %{with armnn_onnx} +%package -n libarmnnOnnxParser%{version_lib_onnxparser}%{?package_suffix} +Summary: libarmnnOnnxParser from armnn +Group: Development/Libraries/C and C++ +%if "%{target}" == "opencl" +Conflicts: libarmnnOnnxParser%{version_lib_onnxparser} +%else +Conflicts: libarmnnOnnxParser%{version_lib_onnxparser}-opencl +%endif + +%description -n libarmnnOnnxParser%{version_lib_onnxparser}%{?package_suffix} +Arm NN is an inference engine for CPUs, GPUs and NPUs. +It bridges the gap between existing NN frameworks and the underlying IP. +It enables efficient translation of existing neural network frameworks, +such as TensorFlow Lite, allowing them to run efficiently – without +modification – across Arm Cortex CPUs and Arm Mali GPUs. + +This package contains the libarmnnOnnxParser library from armnn. +%endif + +%prep +%setup -q -n armnn-%{version} +%patch -P 1 -p1 +%if %{with armnn_extra_tests} +%patch -P 200 -p1 +%patch -P 201 -p1 +%patch -P 202 -p1 +%patch -P 203 -p1 +%patch -P 204 -p1 +%patch -P 205 -p1 +# Add Boost log as downstream extra test requires it +sed -i 's/ find_package(Boost 1.59 REQUIRED COMPONENTS unit_test_framework)/find_package(Boost 1.59 REQUIRED COMPONENTS unit_test_framework filesystem system log program_options)/' ./cmake/GlobalConfig.cmake +%endif + +%build +%if %{with armnn_onnx} +mkdir onnx_deps +PROTO=$(find %{_libdir} -name onnx.proto) +protoc $PROTO --proto_path=. --proto_path=%{_includedir} --proto_path=$(dirname $(find %{_libdir} -name onnx)) --cpp_out=./onnx_deps +%endif +%cmake \ + -DCMAKE_SKIP_RPATH=True \ + -DSHARED_BOOST=1 \ + -DCMAKE_CXX_FLAGS:STRING="%{optflags} -pthread -Wno-error=unused-result" \ + -DBOOST_LIBRARYDIR=%{_libdir} \ +%if %{with armnn_onnx} + -DBUILD_ONNX_PARSER=ON \ + -DONNX_GENERATED_SOURCES=../onnx_deps/ \ +%else + -DBUILD_ONNX_PARSER=OFF \ +%endif +%if %{with armnn_flatbuffers} + -DBUILD_ARMNN_SERIALIZER=ON \ + -DFLATC_DIR=%{_bindir} \ + -DFLATBUFFERS_INCLUDE_PATH=%{_includedir} \ + -DBUILD_TF_LITE_PARSER=ON \ + -DTfLite_Schema_INCLUDE_PATH=%{_includedir}/tensorflow/lite/schema/ \ + -DTF_LITE_SCHEMA_INCLUDE_PATH=%{_includedir}/tensorflow/lite/schema/ \ +%else + -DBUILD_ARMNN_SERIALIZER=OFF \ + -DBUILD_TF_LITE_PARSER=OFF \ +%endif +%if %{with compute_neon} || %{with compute_cl} + -DARMCOMPUTE_INCLUDE=%{_includedir} \ + -DHALF_INCLUDE=%{_includedir}/half \ + -DARMCOMPUTE_BUILD_DIR=%{_libdir} \ + -DARMCOMPUTE_ROOT=%{_prefix} \ +%endif +%if %{with compute_neon} + -DARMCOMPUTENEON=ON \ +%else + -DARMCOMPUTENEON=OFF \ +%endif +%if %{with compute_cl} + -DARMCOMPUTECL=ON \ + -DOPENCL_INCLUDE=%{_includedir} \ +%else + -DARMCOMPUTECL=OFF \ +%endif + -DTHIRD_PARTY_INCLUDE_DIRS=%{_includedir} \ +%if %{with armnn_flatbuffers} + -DBUILD_SAMPLE_APP=ON \ +%else + -DBUILD_SAMPLE_APP=OFF \ +%endif +%if %{with armnn_tests} + -DBUILD_UNIT_TESTS=ON \ + -DBUILD_TESTS=ON \ +%else + -DBUILD_UNIT_TESTS=OFF \ + -DBUILD_TESTS=OFF \ +%endif +%if %{with PyArmnn} + -DBUILD_PYTHON_WHL=ON \ + -DBUILD_PYTHON_SRC=ON \ +%else + -DBUILD_PYTHON_WHL=OFF \ + -DBUILD_PYTHON_SRC=OFF \ +%endif +%if %{with armnn_extra_tests} + -DBUILD_ARMNN_EXAMPLES=ON +%else + -DBUILD_ARMNN_EXAMPLES=OFF +%endif + +%if 0%{?suse_version} > 1500 +%cmake_build +%else +%make_jobs +%endif +%if %{with armnn_tests} +pushd tests/ +%if 0%{?suse_version} > 1500 +%cmake_build +%else +%make_jobs +%endif +popd +%endif + +%install +%cmake_install +%if %{with armnn_tests} +# Install tests manually +install -d %{buildroot}%{_bindir} +CP_ARGS="-Prf --preserve=mode,timestamps --no-preserve=ownership" \ +find ./build/tests -maxdepth 1 -type f -executable -exec cp $CP_ARGS {} %{buildroot}%{_bindir} \; +%endif +%if %{with armnn_flatbuffers} +# Install Sample app +cp $CP_ARGS ./build/samples/SimpleSample %{buildroot}%{_bindir} +%endif +# Drop static libs - https://github.com/ARM-software/armnn/issues/514 +rm -f %{buildroot}%{_libdir}/*.a + +# openCL UnitTests are failing in OBS due to the lack of openCL device +%if %{without compute_cl} && %{with armnn_tests} +%check +# Run tests +LD_LIBRARY_PATH="$(pwd)/build/" \ +./build/UnitTests $UnitTestFlags +%endif + +%post -n libarmnn%{version_lib}%{?package_suffix} -p /sbin/ldconfig +%postun -n libarmnn%{version_lib}%{?package_suffix} -p /sbin/ldconfig + +%post -n libarmnnBasePipeServer%{version_lib}%{?package_suffix} -p /sbin/ldconfig +%postun -n libarmnnBasePipeServer%{version_lib}%{?package_suffix} -p /sbin/ldconfig + +%post -n libarmnnTestUtils%{version_lib_testutils}%{?package_suffix} -p /sbin/ldconfig +%postun -n libarmnnTestUtils%{version_lib_testutils}%{?package_suffix} -p /sbin/ldconfig + +%post -n libtimelineDecoderJson%{version_lib}%{?package_suffix} -p /sbin/ldconfig +%postun -n libtimelineDecoderJson%{version_lib}%{?package_suffix} -p /sbin/ldconfig + +%post -n libtimelineDecoder%{version_lib}%{?package_suffix} -p /sbin/ldconfig +%postun -n libtimelineDecoder%{version_lib}%{?package_suffix} -p /sbin/ldconfig + +%if %{with armnn_flatbuffers} +%post -n libarmnnSerializer%{version_lib}%{?package_suffix} -p /sbin/ldconfig +%postun -n libarmnnSerializer%{version_lib}%{?package_suffix} -p /sbin/ldconfig + +%post -n libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} -p /sbin/ldconfig +%postun -n libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} -p /sbin/ldconfig +%endif + +%if %{with armnn_onnx} +%post -n libarmnnOnnxParser%{version_lib_onnxparser}%{?package_suffix} -p /sbin/ldconfig +%postun -n libarmnnOnnxParser%{version_lib_onnxparser}%{?package_suffix} -p /sbin/ldconfig +%endif + +%files +%defattr(-,root,root) +%doc README.md +%license LICENSE +%if %{with armnn_tests} +%{_bindir}/ExecuteNetwork +%if %{with armnn_flatbuffers} +%{_bindir}/ArmnnConverter +%{_bindir}/TfLite*-Armnn +%endif +%if %{with armnn_onnx} +%{_bindir}/Onnx*-Armnn +%endif +%if %{with armnn_flatbuffers} +%{_bindir}/SimpleSample +%endif +%endif + +%if %{with armnn_extra_tests} +%files -n %{name}-extratests +%{_bindir}/ArmnnExamples +%endif + +%files -n libarmnn%{version_lib}%{?package_suffix} +%{_libdir}/libarmnn.so.* + +%files -n libarmnnBasePipeServer%{version_lib}%{?package_suffix} +%{_libdir}/libarmnnBasePipeServer.so.* + +%files -n libarmnnTestUtils%{version_lib_testutils}%{?package_suffix} +%{_libdir}/libarmnnTestUtils.so.* + +%files -n libtimelineDecoder%{version_lib}%{?package_suffix} +%{_libdir}/libtimelineDecoder.so.* + +%files -n libtimelineDecoderJson%{version_lib}%{?package_suffix} +%{_libdir}/libtimelineDecoderJson.so.* + +%if %{with armnn_flatbuffers} +%files -n libarmnnSerializer%{version_lib}%{?package_suffix} +%{_libdir}/libarmnnSerializer.so.* + +%files -n libarmnnTfLiteParser%{version_lib_tfliteparser}%{?package_suffix} +%{_libdir}/libarmnnTfLiteParser.so.* +%endif + +%if %{with armnn_onnx} +%files -n libarmnnOnnxParser%{version_lib_onnxparser}%{?package_suffix} +%{_libdir}/libarmnnOnnxParser.so.* +%endif + +%files devel +%defattr(-,root,root) +%dir %{_includedir}/armnn/ +%{_includedir}/armnn/*.hpp +%dir %{_includedir}/armnn/backends +%{_includedir}/armnn/backends/CMakeLists.txt +%{_includedir}/armnn/backends/*.hpp +%dir %{_includedir}/armnn/profiling +%{_includedir}/armnn/profiling/*.hpp +%dir %{_includedir}/armnn/profiling/client/ +%dir %{_includedir}/armnn/profiling/client/include/ +%{_includedir}/armnn/profiling/client/include/*.hpp +%dir %{_includedir}/armnn/profiling/client/include/backends/ +%{_includedir}/armnn/profiling/client/include/backends/*.hpp +%dir %{_includedir}/armnn/profiling/common/ +%dir %{_includedir}/armnn/profiling/common/include/ +%{_includedir}/armnn/profiling/common/include/*.hpp +%dir %{_includedir}/armnn/utility +%{_includedir}/armnn/utility/*.hpp +%dir %{_includedir}/armnnUtils +%{_includedir}/armnnUtils/*.hpp +%dir %{_includedir}/armnnOnnxParser/ +%{_includedir}/armnnOnnxParser/*.hpp +%dir %{_includedir}/armnnTfLiteParser/ +%{_includedir}/armnnTfLiteParser/*.hpp +%dir %{_includedir}/armnnDeserializer/ +%{_includedir}/armnnDeserializer/IDeserializer.hpp +%dir %{_includedir}/armnnSerializer/ +%{_includedir}/armnnSerializer/ISerializer.hpp +%dir %{_includedir}/armnnTestUtils/ +%{_includedir}/armnnTestUtils/*.hpp +%dir %{_libdir}/cmake/armnn +%{_libdir}/cmake/armnn/* +%{_libdir}/libarmnn.so +%{_libdir}/libarmnnBasePipeServer.so +%{_libdir}/libtimelineDecoder.so +%{_libdir}/libtimelineDecoderJson.so +%if %{with armnn_flatbuffers} +%{_libdir}/libarmnnSerializer.so +%{_libdir}/libarmnnTfLiteParser.so +%endif +%{_libdir}/libarmnnTestUtils.so +%if %{with armnn_onnx} +%{_libdir}/libarmnnOnnxParser.so +%endif + +%changelog