Compare commits

50 Commits

Author SHA256 Message Date
672072c121 Accepting request 1295108 from science:machinelearning
OBS-URL: https://build.opensuse.org/request/show/1295108
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=15
2025-07-23 14:33:45 +00:00
caabb091ab Accepting request 1269118 from science:machinelearning
- Update to 25.02:
  * Changelog: https://github.com/ARM-software/armnn/releases/tag/v25.02
- Disable TF-Lite support as it is going away from Tumbleweed

OBS-URL: https://build.opensuse.org/request/show/1269118
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=14
2025-04-14 10:59:07 +00:00
514faa65e4 Fix BR for opencl flavor
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=98
2025-04-14 06:34:18 +00:00
de3a1e3c13 OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=97 2025-04-10 15:39:36 +00:00
d08bf2e45e - Update to 25.02:
* Changelog: https://github.com/ARM-software/armnn/releases/tag/v25.02
- Disable TF-Lite support as it is going away from Tumbleweed

OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=96
2025-04-10 12:34:49 +00:00
c147cab9f8 Accepting request 1227952 from science:machinelearning
OBS-URL: https://build.opensuse.org/request/show/1227952
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=13
2024-12-03 19:47:11 +00:00
5c4eb28cf2 - Remove downstream patches:
* 0003-add-more-test-command-line-arguments.patch
  * 0005-add-armnn-mobilenet-test-example.patch
  * 0006-armnn-mobilenet-test-example.patch
  * 0009-command-line-options-for-video-port-selection.patch
  * 0010-armnnexamples-update-for-19.08-modifications.patch
  * armnn-fix_find_opencv.patch
-  Update to 24.11:
  * Changelog: https://github.com/ARM-software/armnn/releases/tag/v24.11
- Drop upstream patch:
  * armnn-fix-armv7.patch

OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=94
2024-12-03 09:43:08 +00:00
7b5926092b Accepting request 1199228 from science:machinelearning
OBS-URL: https://build.opensuse.org/request/show/1199228
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=12
2024-09-06 15:19:06 +00:00
a54685d0bf - Add patch to fix build on armv7:
* armnn-fix-armv7.patch

OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=92
2024-09-06 13:30:37 +00:00
1b7a94b61f Accepting request 1198458 from science:machinelearning
OBS-URL: https://build.opensuse.org/request/show/1198458
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=11
2024-09-04 11:23:09 +00:00
ac1080cffc - Update to 24.08:
* Changelog: https://github.com/ARM-software/armnn/releases/tag/v24.08

OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=90
2024-09-03 11:35:47 +00:00
68683db196 Accepting request 1181440 from science:machinelearning
OBS-URL: https://build.opensuse.org/request/show/1181440
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=10
2024-06-18 20:51:55 +00:00
0c669f4ffb - Update to 24.05:
* Changelog: https://github.com/ARM-software/armnn/releases/tag/v24.05

OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=88
2024-06-18 08:58:19 +00:00
132956bed7 Accepting request 1164355 from science:machinelearning
OBS-URL: https://build.opensuse.org/request/show/1164355
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=9
2024-04-03 15:21:27 +00:00
41fe83f907 Accepting request 1164354 from home:Guillaume_G:branches:science:machinelearning
- Update to 24.02:
  * Changelog: https://github.com/ARM-software/armnn/releases/tag/v24.02

OBS-URL: https://build.opensuse.org/request/show/1164354
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=86
2024-04-03 12:20:34 +00:00
373d712089 Accepting request 1150746 from science:machinelearning
OBS-URL: https://build.opensuse.org/request/show/1150746
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=8
2024-02-26 18:46:04 +00:00
7360c5b40a Accepting request 1147751 from home:dimstar:rpm4.20:a
Prepare for RPM 4.20

OBS-URL: https://build.opensuse.org/request/show/1147751
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=84
2024-02-26 09:09:32 +00:00
a2124b0f51 Accepting request 1132049 from science:machinelearning
- Update to 23.11:
  * Changelog: https://github.com/ARM-software/armnn/releases/tag/v23.11

OBS-URL: https://build.opensuse.org/request/show/1132049
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=7
2023-12-08 21:33:14 +00:00
81b910c1d2 OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=82 2023-12-08 08:34:35 +00:00
5ac6b5d1a8 Accepting request 1131646 from home:Guillaume_G:branches:science:machinelearning
- Update to 23.11:
  * Changelog: https://github.com/ARM-software/armnn/releases/tag/v23.11

OBS-URL: https://build.opensuse.org/request/show/1131646
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=81
2023-12-07 15:52:19 +00:00
821b6c0cc5 Accepting request 1109209 from science:machinelearning
OBS-URL: https://build.opensuse.org/request/show/1109209
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=6
2023-09-06 16:59:35 +00:00
7b1e73c879 Accepting request 1109207 from home:Guillaume_G:branches:science:machinelearning
-  Update to 23.08:
  * Changelog: https://github.com/ARM-software/armnn/releases/tag/v23.08

OBS-URL: https://build.opensuse.org/request/show/1109207
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=79
2023-09-06 07:24:43 +00:00
aac765d7dd Accepting request 1092624 from science:machinelearning
- Update to 23.05:
  * Changelog: https://github.com/ARM-software/armnn/releases/tag/v23.05
- Drop upstream patches:
  * armnn-fix-gcc13.patch
  * armnn-fix-gcc13-2.patch
  * 4cf40d7.diff

OBS-URL: https://build.opensuse.org/request/show/1092624
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=5
2023-06-13 14:09:19 +00:00
a118d831ed OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=77 2023-06-12 15:41:55 +00:00
1a3c1990bd OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=76 2023-06-12 15:41:50 +00:00
39ebb1448c OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=75 2023-06-12 15:41:45 +00:00
30eba04c05 Accepting request 1092615 from home:Guillaume_G:branches:science:machinelearning
- Update to 23.05:
  * Changelog: https://github.com/ARM-software/armnn/releases/tag/v23.05
- Drop upstream patches:
  * armnn-fix-gcc13.patch
  * armnn-fix-gcc13-2.patch
  * 4cf40d7.diff

OBS-URL: https://build.opensuse.org/request/show/1092615
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=74
2023-06-12 15:26:46 +00:00
5f10b81bc3 Accepting request 1080889 from science:machinelearning
- Add additionnal gcc13 fixes:
  * 4cf40d7.diff
  * armnn-fix-gcc13-2.patch
- Update armnn-fix-gcc13.patch with upstream patch
- Add patch to fix build with GCC13:
  * armnn-fix-gcc13.patch

OBS-URL: https://build.opensuse.org/request/show/1080889
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=4
2023-04-20 14:49:00 +00:00
fd0141bad3 Accepting request 1080887 from home:Guillaume_G:branches:science:machinelearning
- Add additionnal gcc13 fixes:
  * 4cf40d7.diff
  * armnn-fix-gcc13-2.patch

OBS-URL: https://build.opensuse.org/request/show/1080887
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=72
2023-04-20 13:15:14 +00:00
0b06e7426e Accepting request 1077421 from home:Guillaume_G:branches:science:machinelearning
- Update armnn-fix-gcc13.patch with upstream patch

OBS-URL: https://build.opensuse.org/request/show/1077421
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=71
2023-04-05 06:30:22 +00:00
7e7e73f680 Accepting request 1073702 from home:Guillaume_G:branches:science:machinelearning
- Add patch to fix build with GCC13:
  * armnn-fix-gcc13.patch

OBS-URL: https://build.opensuse.org/request/show/1073702
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=70
2023-03-22 08:09:38 +00:00
c44d7244aa Accepting request 1072066 from science:machinelearning
OBS-URL: https://build.opensuse.org/request/show/1072066
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=3
2023-03-15 17:56:16 +00:00
ee828868a2 Accepting request 1072065 from home:Guillaume_G:branches:science:machinelearning
- Update to 23.02:
  * Changelog: https://github.com/ARM-software/armnn/releases/tag/v23.02
- Drop upstream patches:
  * armnn-gh711.patch
  * armnn-281e97b.patch

OBS-URL: https://build.opensuse.org/request/show/1072065
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=68
2023-03-15 09:52:09 +00:00
e017979d44 Accepting request 1042947 from science:machinelearning
OBS-URL: https://build.opensuse.org/request/show/1042947
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=2
2022-12-15 18:25:04 +00:00
e00b6ac8ff Accepting request 1042946 from home:Guillaume_G:branches:science:machinelearning
- tensorflow-lite >= 2.10 is only avaialble on Tumbleweed
- Add patch to use static libraries not object libraries for
  support library:
  * armnn-281e97b.patch

OBS-URL: https://build.opensuse.org/request/show/1042946
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=66
2022-12-14 15:42:02 +00:00
57b146d70b Accepting request 1039050 from science:machinelearning
Add armnn back to Factory

OBS-URL: https://build.opensuse.org/request/show/1039050
OBS-URL: https://build.opensuse.org/package/show/openSUSE:Factory/armnn?expand=0&rev=1
2022-11-30 14:00:12 +00:00
fe92d870c9 Accepting request 1039048 from home:Guillaume_G:branches:science:machinelearning
- Update to 22.11:
  * Changelog: https://github.com/ARM-software/armnn/releases/tag/v22.11
  * Add libarmnnTestUtils
- Add patch to fix build:
  * armnn-gh711.patch
- Update to 22.08:
  * Changelog: https://github.com/ARM-software/armnn/releases/tag/v22.08
- Drop upstream patch:
  * armnn-fix-gcc12.patch
- Update to 22.05:
  * Changelog: https://github.com/ARM-software/armnn/releases/tag/v22.05
- Add patch to fix build with GCC12 and new flatbuffers:
  * armnn-fix-gcc12.patch

OBS-URL: https://build.opensuse.org/request/show/1039048
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=64
2022-11-30 07:24:10 +00:00
c559658e25 OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=63 2022-10-14 12:37:56 +00:00
8a6803a6fe Accepting request 750961 from home:Guillaume_G:branches:science:machinelearning
- tensorflow-devel package now includes *.pb.* files, so 
  remove TensorFlow parser build workaround:
  * armnn-fix_tensorflow_link.patch

OBS-URL: https://build.opensuse.org/request/show/750961
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=12
2019-11-26 14:25:49 +00:00
4487cb0d74 Accepting request 743531 from home:Guillaume_G:branches:science:machinelearning
- Add a conflict between armnn-extratests and armnn-opencl-extratests

OBS-URL: https://build.opensuse.org/request/show/743531
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=11
2019-10-28 15:39:41 +00:00
6ea3b628c3 Accepting request 743495 from home:Guillaume_G:branches:science:machinelearning
- Add a conflict between armnn-devel and armnn-opencl-devel

OBS-URL: https://build.opensuse.org/request/show/743495
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=10
2019-10-28 13:08:08 +00:00
fb9078d4c9 Accepting request 743457 from home:Guillaume_G:branches:science:machinelearning
- Enable ONNX for Tumbleweed

- Add downstream ArmnnExamples in a separate '-extratests' package
  with patches:
  * 0003-add-more-test-command-line-arguments.patch
  * 0005-add-armnn-mobilenet-test-example.patch
  * 0006-armnn-mobilenet-test-example.patch
  * 0007-enable-use-of-arm-compute-shared-library.patch
  * 0009-command-line-options-for-video-port-selection.patch
  * 0010-armnnexamples-update-for-19.08-modifications.patch
- Fix build when extratests are disabled
  * armnn-fix_find_opencv.patch

OBS-URL: https://build.opensuse.org/request/show/743457
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=9
2019-10-28 10:54:22 +00:00
3beeef13a8 Accepting request 736849 from home:Guillaume_G:branches:home:mslacken:ml
- Replace patch with an upstreamable version:
 * armnn-generate-versioned-library.patch
- Add ONNX Parser support (disabled by default as there is no 
  official ONNX package yet)

OBS-URL: https://build.opensuse.org/request/show/736849
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=8
2019-10-10 12:22:47 +00:00
98cd3ecc18 Accepting request 734348 from home:Guillaume_G:branches:science:machinelearning
- Re-enable TensorFlow Parser for TW
- Add openCL flavor
- Fix armv7

OBS-URL: https://build.opensuse.org/request/show/734348
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=7
2019-10-01 14:35:20 +00:00
6670a974c5 Accepting request 728506 from home:Guillaume_G:branches:science:machinelearning
- Disable TensorFlow as on 15.1 only x86_64 succeed and on TW we 
  have incompatibility with protobuf (3.8.0 in TW and 
  Tensorflow uses 3.6.1 internally)

- Update to 19.08:
- Changelog: https://github.com/ARM-software/armnn/releases/tag/v19.08
- Remove upstreamed patch:
  * armnn-fix_quantizer_link.patch
  * armnn-fix_caffe_parser_with_new_protobuf.patch
- Refresh patch:
  * armnn-generate-versioned-library.patch 
- Drop patches not needed anymore:
  * armnn-remove_broken_std_move.patch
  * armnn-fix_build_with_gcc9.patch

- Disable LTO until lto link is fixed
  https://github.com/ARM-software/armnn/issues/251

- Fix build in Tumbleweed, with latest protobuf:
  * armnn-fix_caffe_parser_with_new_protobuf.patch 

- Enable Tensorflow parser
- Fix link with Tensorflow:
  * armnn-fix_tensorflow_link.patch

OBS-URL: https://build.opensuse.org/request/show/728506
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=6
2019-09-05 13:03:11 +00:00
df63b9f0ad OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=5 2019-06-06 08:21:58 +00:00
f0ef77ff1d Accepting request 707891 from home:Guillaume_G:branches:science:machinelearning
- Build and package libarmnnTfLiteParser
- Fix libarmnnQuantizer build with:
  * armnn-fix_quantizer_link.patch
- Add _constraints to avoid OOM errors
- Update to 19.05:
- Changelog: https://github.com/ARM-software/armnn/releases/tag/v19.05
- Remove upstreamed patch:
  * armnn-fix_stb_include.patch
- Rebase patch:
  * armnn-generate-versioned-library.patch
- Update patch:
  * armnn-remove_broken_std_move.patch
- Fix build on Tumbleweed with:
  * armnn-fix_build_with_gcc9.patch
- Fix build on Tumbleweed with:
  * armnn-remove_broken_std_move.patch

OBS-URL: https://build.opensuse.org/request/show/707891
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=4
2019-06-05 16:12:07 +00:00
2a68779842 Accepting request 706234 from home:Guillaume_G:branches:science:machinelearning
- Call ldconfig in post/postun for libarmnnCaffeParser

OBS-URL: https://build.opensuse.org/request/show/706234
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=3
2019-05-29 08:13:46 +00:00
d337a714b1 Accepting request 705659 from home:Guillaume_G:branches:science:machinelearning
- Enable and fix Caffe parser
- Use %cmake_build macro

OBS-URL: https://build.opensuse.org/request/show/705659
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=2
2019-05-28 14:53:17 +00:00
331194d69e Accepting request 695957 from home:Guillaume_G:branches:science:machinelearning
- Add compute_cl option, disabled by default since check fails as
  no openCL are enabled in OBS
- Split libarmnn and libarmnnSerializer to separate packages
- Add patch to have versionned libs:
  * armnn-generate-versioned-library.patch
- Package versionned libs
- Enable NEON backend on AArch64
- Add patch to enable use of shared lib for ComputeLibrary:
  * 0007-enable-use-of-arm-compute-shared-library.patch
- Update to 19.02
- Remove upstreamed patch:
  * armnn-fix_catching_polymorphic_type.patch
- Update to 18.11
- Add patch to fix build:
  * armnn-fix_catching_polymorphic_type.patch
  * armnn-fix_boost.patch
  * armnn-fix_stb_include.patch
- Initial version 18.08

OBS-URL: https://build.opensuse.org/request/show/695957
OBS-URL: https://build.opensuse.org/package/show/science:machinelearning/armnn?expand=0&rev=1
2019-04-23 13:38:58 +00:00
10 changed files with 0 additions and 961 deletions

View File

@@ -1,76 +0,0 @@
From 964cb82f3b811aec6663255ab0aa589f0a3be0ee Mon Sep 17 00:00:00 2001
From: Qin Su <qsu@ti.com>
Date: Fri, 22 Feb 2019 14:10:07 -0500
Subject: [PATCH] add more test command line arguments
Updated by Guillaume_G to apply properly (s/BOOST_ASSERT/ARMNN_ASSERT/)
Upstream-Status: Inappropriate [TI only test code]
Signed-off-by: Qin Su <qsu@ti.com>
---
tests/InferenceTest.inl | 49 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 49 insertions(+)
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index 538720b..6fd21b8 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -326,6 +326,55 @@ int ClassifierInferenceTestMain(int argc,
ARMNN_ASSERT(modelFilename);
ARMNN_ASSERT(inputBindingName);
ARMNN_ASSERT(outputBindingName);
+ int count;
+ const char *p_input;
+ char inmodelname[500];
+ char outtensorname[500];
+
+ /* parse command line */
+ for (count = 1; count < argc; count++)
+ {
+ if (*(argv[count]) == '+')
+ {
+ p_input = argv[count] + 1;
+ switch (*(p_input))
+ {
+ case 'i':
+ case 'I':
+ strcpy(inmodelname, p_input + 2);
+ modelFilename = &inmodelname[0];
+ std::cout << "Input model = " << modelFilename << std::endl;
+ break;
+ case 'o':
+ case 'O':
+ strcpy(outtensorname, p_input + 2);
+ outputBindingName = &outtensorname[0];
+ std::cout << "out tensor name = " << outputBindingName << std::endl;
+ break;
+ default:
+ break;
+ }
+ }
+ else if (*(argv[count]) == '-')
+ {
+ p_input = argv[count] + 1;
+ switch (*(p_input))
+ {
+ case '-':
+ p_input = argv[count] + 2;
+ case 'h':
+ case 'H':
+ std::cout <<"\nAdditional Options: " << std::endl;
+ std::cout <<" +i Set user specified inference model name." << std::endl;
+ std::cout <<" If not set, default name is used." << std::endl;
+ std::cout <<" +o Set user specified output tensor name." << std::endl;
+ std::cout <<" If not set, default name is used.\n" << std::endl;
+ break;
+ default:
+ break;
+ }
+ }
+ }
return InferenceTestMain(argc, argv, defaultTestCaseIds,
[=]
--
1.9.1

View File

@@ -1,71 +0,0 @@
From 99a6c339f1828d3cd1b193cf702bada9011d900b Mon Sep 17 00:00:00 2001
From: Djordje Senicic <x0157990@ti.com>
Date: Mon, 24 Jun 2019 14:29:19 -0400
Subject: [PATCH] add armnn mobilenet test example
Upstream-Status: Inappropriate [TI only test code]
Signed-off-by: Qin Su <qsu@ti.com>
Signed-off-by: Djordje Senicic <x0157990@ti.com>
[Guillaume's update: Add boost_log dep]
[Guillaume's update: Update to apply on top of 20.08]
---
tests/CMakeLists.txt | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index dfcf4b48..5a78d3a6 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -1,3 +1,6 @@
+find_package( OpenCV REQUIRED )
+include_directories( ${OpenCV_INCLUDE_DIRS} )
+
# UnitTests
include(CheckIncludeFiles)
@@ -348,3 +351,42 @@ if(BUILD_ARMNN_QUANTIZER)
target_include_directories(ImageCSVFileGenerator PRIVATE ../src/armnnUtils)
ImageTensorExecutor(ImageCSVFileGenerator)
endif()
+
+if (BUILD_ARMNN_EXAMPLES)
+ set(ArmnnExamples_sources
+ ArmnnExamples/ArmnnExamples.cpp)
+
+ add_executable_ex(ArmnnExamples ${ArmnnExamples_sources})
+
+ target_include_directories(ArmnnExamples PRIVATE ../src/armnnUtils)
+ target_include_directories(ArmnnExamples PRIVATE ../src/armnn)
+ target_include_directories(ArmnnExamples PRIVATE ../src/backends)
+
+ if (BUILD_CAFFE_PARSER)
+ target_link_libraries(ArmnnExamples armnnCaffeParser)
+ endif()
+ if (BUILD_TF_PARSER)
+ target_link_libraries(ArmnnExamples armnnTfParser)
+ endif()
+
+ if (BUILD_TF_LITE_PARSER)
+ target_link_libraries(ArmnnExamples armnnTfLiteParser)
+ endif()
+ if (BUILD_ONNX_PARSER)
+ target_link_libraries(ArmnnExamples armnnOnnxParser)
+ endif()
+
+ target_link_libraries(ArmnnExamples armnn)
+ target_link_libraries(ArmnnExamples ${CMAKE_THREAD_LIBS_INIT})
+ if(OPENCL_LIBRARIES)
+ target_link_libraries(ArmnnExamples ${OPENCL_LIBRARIES})
+ endif()
+
+ target_link_libraries(ArmnnExamples
+ ${Boost_LOG_LIBRARY}
+ ${Boost_SYSTEM_LIBRARY}
+ ${Boost_FILESYSTEM_LIBRARY}
+ ${Boost_PROGRAM_OPTIONS_LIBRARY}
+ ${OpenCV_LIBS})
+ addDllCopyCommands(ArmnnExamples)
+endif()
--
2.17.1

View File

@@ -1,680 +0,0 @@
From 4d5e7db268a4f816e24449e8ad011e35890f0c7e Mon Sep 17 00:00:00 2001
From: Qin Su <qsu@ti.com>
Date: Fri, 22 Feb 2019 13:39:09 -0500
Subject: [PATCH] armnn mobilenet test example
Upstream-Status: Inappropriate [TI only test code]
Signed-off-by: Qin Su <qsu@ti.com>
[Guillaume's update: s#Logging.hpp#armnn/Logging.hpp#]
[Guillaume's update: Add #include <boost/log/trivial.hpp>]
[Guillaume's update: Drop armnnUtils::ConfigureLogging(...)]
[Guillaume's update: Handle boost::variant to mapbox::util::variant update]
---
tests/ArmnnExamples/ArmnnExamples.cpp | 654 ++++++++++++++++++++++++++++++++++
1 file changed, 654 insertions(+)
create mode 100644 tests/ArmnnExamples/ArmnnExamples.cpp
diff --git a/tests/ArmnnExamples/ArmnnExamples.cpp b/tests/ArmnnExamples/ArmnnExamples.cpp
new file mode 100644
index 0000000..53a11cc
--- /dev/null
+++ b/tests/ArmnnExamples/ArmnnExamples.cpp
@@ -0,0 +1,654 @@
+/******************************************************************************
+ * Copyright (c) 2018, Texas Instruments Incorporated - http://www.ti.com/
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Texas Instruments Incorporated nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************///
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <armnn/ArmNN.hpp>
+#include <boost/log/trivial.hpp>
+
+#include <utility>
+#include <armnn/TypesUtils.hpp>
+
+#if defined(ARMNN_CAFFE_PARSER)
+#include "armnnCaffeParser/ICaffeParser.hpp"
+#endif
+#if defined(ARMNN_TF_PARSER)
+#include "armnnTfParser/ITfParser.hpp"
+#endif
+#if defined(ARMNN_TF_LITE_PARSER)
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+#endif
+#if defined(ARMNN_ONNX_PARSER)
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#endif
+#include <mapbox/variant.hpp> /*#include "CsvReader.hpp"*/
+#include "../InferenceTest.hpp"
+#include <armnn/Logging.hpp>
+#include <Profiling.hpp>
+
+#include <boost/algorithm/string/trim.hpp>
+#include <boost/algorithm/string/split.hpp>
+#include <boost/algorithm/string/classification.hpp>
+#include <boost/program_options.hpp>
+
+#include <iostream>
+#include <fstream>
+#include <functional>
+#include <future>
+#include <algorithm>
+#include <iterator>
+#include<vector>
+
+#include <signal.h>
+#include "opencv2/core.hpp"
+#include "opencv2/imgproc.hpp"
+#include "opencv2/highgui.hpp"
+#include "opencv2/videoio.hpp"
+#include <time.h>
+
+using namespace cv;
+
+#define INPUT_IMAGE 0
+#define INPUT_VIDEO 1
+#define INPUT_CAMERA 2
+
+Mat test_image;
+Rect rectCrop;
+
+time_point<high_resolution_clock> predictStart;
+time_point<high_resolution_clock> predictEnd;
+
+void imagenetCallBackFunc(int event, int x, int y, int flags, void* userdata)
+{
+ if ( event == EVENT_RBUTTONDOWN )
+ {
+ std::cout << "Right button of the mouse is clicked - position (" << x << ", " << y << ")" << " ... prepare to exit!" << std::endl;
+ exit(0);
+ }
+}
+
+inline float Lerpfloat(float a, float b, float w)
+{
+ return w * b + (1.f - w) * a;
+}
+
+// Load a single image
+struct ImageData
+{
+ unsigned int m_width;
+ unsigned int m_height;
+ unsigned int m_chnum;
+ unsigned int m_size;
+ std::vector<uint8_t> m_image;
+};
+// Load a single image
+std::unique_ptr<ImageData> loadImageData(std::string image_path, VideoCapture &cap, cv::Mat img, int input_type)
+{
+ //cv::Mat img;
+ if (input_type == INPUT_IMAGE)
+ {
+ /* use OpenCV to get the image */
+ img = cv::imread(image_path, CV_LOAD_IMAGE_COLOR);
+ }
+ cv::cvtColor(img, img, CV_BGR2RGB); //convert image format from BGR(openCV format) to RGB (armnn required format).
+
+ // store image and label in output Image
+ std::unique_ptr<ImageData> ret(new ImageData);
+ ret->m_width = static_cast<unsigned int>(img.cols);
+ ret->m_height = static_cast<unsigned int>(img.rows);
+ ret->m_chnum = static_cast<unsigned int>(img.channels());
+ ret->m_size = static_cast<unsigned int>(img.cols*img.rows*img.channels());
+ ret->m_image.resize(ret->m_size);
+
+ for (unsigned int i = 0; i < ret->m_size; i++)
+ {
+ ret->m_image[i] = static_cast<uint8_t>(img.data[i]);
+ }
+ return ret;
+}
+// to resize input tensor size
+std::vector<float> ResizeBilinear(std::vector<uint8_t> input,
+ const unsigned int inWidth,
+ const unsigned int inHeight,
+ const unsigned int inChnum,
+ const unsigned int outputWidth,
+ const unsigned int outputHeight)
+{
+ std::vector<float> out;
+ out.resize(outputWidth * outputHeight * 3);
+
+ // We follow the definition of TensorFlow and AndroidNN: the top-left corner of a texel in the output
+ // image is projected into the input image to figure out the interpolants and weights. Note that this
+ // will yield different results than if projecting the centre of output texels.
+
+ const unsigned int inputWidth = inWidth;
+ const unsigned int inputHeight = inHeight;
+
+ // How much to scale pixel coordinates in the output image to get the corresponding pixel coordinates
+ // in the input image.
+ const float scaleY = boost::numeric_cast<float>(inputHeight) / boost::numeric_cast<float>(outputHeight);
+ const float scaleX = boost::numeric_cast<float>(inputWidth) / boost::numeric_cast<float>(outputWidth);
+
+ uint8_t rgb_x0y0[3];
+ uint8_t rgb_x1y0[3];
+ uint8_t rgb_x0y1[3];
+ uint8_t rgb_x1y1[3];
+ unsigned int pixelOffset00, pixelOffset10, pixelOffset01, pixelOffset11;
+ for (unsigned int y = 0; y < outputHeight; ++y)
+ {
+ // Corresponding real-valued height coordinate in input image.
+ const float iy = boost::numeric_cast<float>(y) * scaleY;
+ // Discrete height coordinate of top-left texel (in the 2x2 texel area used for interpolation).
+ const float fiy = floorf(iy);
+ const unsigned int y0 = boost::numeric_cast<unsigned int>(fiy);
+
+ // Interpolation weight (range [0,1])
+ const float yw = iy - fiy;
+
+ for (unsigned int x = 0; x < outputWidth; ++x)
+ {
+ // Real-valued and discrete width coordinates in input image.
+ const float ix = boost::numeric_cast<float>(x) * scaleX;
+ const float fix = floorf(ix);
+ const unsigned int x0 = boost::numeric_cast<unsigned int>(fix);
+
+ // Interpolation weight (range [0,1]).
+ const float xw = ix - fix;
+
+ // Discrete width/height coordinates of texels below and to the right of (x0, y0).
+ const unsigned int x1 = std::min(x0 + 1, inputWidth - 1u);
+ const unsigned int y1 = std::min(y0 + 1, inputHeight - 1u);
+
+ pixelOffset00 = x0 * inChnum + y0 * inputWidth * inChnum;
+ pixelOffset10 = x1 * inChnum + y0 * inputWidth * inChnum;
+ pixelOffset01 = x0 * inChnum + y1 * inputWidth * inChnum;
+ pixelOffset11 = x1 * inChnum + y1 * inputWidth * inChnum;
+ for (unsigned int c = 0; c < 3; ++c)
+ {
+ rgb_x0y0[c] = input[pixelOffset00+c];
+ rgb_x1y0[c] = input[pixelOffset10+c];
+ rgb_x0y1[c] = input[pixelOffset01+c];
+ rgb_x1y1[c] = input[pixelOffset11+c];
+ }
+
+ for (unsigned c=0; c<3; ++c)
+ {
+ const float ly0 = Lerpfloat(float(rgb_x0y0[c]), float(rgb_x1y0[c]), xw);
+ const float ly1 = Lerpfloat(float(rgb_x0y1[c]), float(rgb_x1y1[c]), xw);
+ const float l = Lerpfloat(ly0, ly1, yw);
+ out[(3*((y*outputWidth)+x)) + c] = static_cast<float>(l)/255.0f;
+ }
+ }
+ }
+ return out;
+}
+
+namespace
+{
+
+ // Configure boost::program_options for command-line parsing and validation.
+ namespace po = boost::program_options;
+
+ template<typename T, typename TParseElementFunc>
+ std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc)
+ {
+ std::vector<T> result;
+ // Processes line-by-line.
+ std::string line;
+ while (std::getline(stream, line))
+ {
+ std::vector<std::string> tokens;
+ try
+ {
+ // Coverity fix: boost::split() may throw an exception of type boost::bad_function_call.
+ boost::split(tokens, line, boost::algorithm::is_any_of("\t ,;:"), boost::token_compress_on);
+ }
+ catch (const std::exception& e)
+ {
+ BOOST_LOG_TRIVIAL(error) << "An error occurred when splitting tokens: " << e.what();
+ continue;
+ }
+ for (const std::string& token : tokens)
+ {
+ if (!token.empty())
+ {
+ try
+ {
+ result.push_back(parseElementFunc(token));
+ }
+ catch (const std::exception&)
+ {
+ BOOST_LOG_TRIVIAL(error) << "'" << token << "' is not a valid number. It has been ignored.";
+ }
+ }
+ }
+ }
+
+ return result;
+ }
+
+ template<typename T>
+ std::vector<T> ParseArray(std::istream& stream);
+ template<>
+ std::vector<unsigned int> ParseArray(std::istream& stream)
+ {
+ return ParseArrayImpl<unsigned int>(stream,
+ [](const std::string& s) { return boost::numeric_cast<unsigned int>(std::stoi(s)); });
+ }
+ void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
+ {
+ // Mark the duplicate devices as 'Undefined'.
+ for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
+ {
+ for (auto j = std::next(i); j != computeDevices.end(); ++j)
+ {
+ if (*j == *i)
+ {
+ *j = armnn::Compute::Undefined;
+ }
+ }
+ }
+
+ // Remove 'Undefined' devices.
+ computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
+ computeDevices.end());
+ }
+} // namespace
+
+template<typename TParser, typename TDataType>
+int MainImpl(const char* modelPath,
+ bool isModelBinary,
+ const std::vector<armnn::BackendId>& computeDevices,
+ const char* inputName,
+ const armnn::TensorShape* inputTensorShape,
+ const char* inputTensorDataFilePath,
+ const char* outputName,
+ bool enableProfiling,
+ const size_t number_frame,
+ const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
+{
+ // Loads input tensor.
+ std::vector<uint8_t> input;
+ std::vector<float> input_resized;
+ using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+
+ try
+ {
+ // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
+ typename InferenceModel<TParser, TDataType>::Params params;
+ //const armnn::TensorShape inputTensorShape({ 1, 224, 224 3});
+
+ params.m_ModelPath = modelPath;
+ params.m_IsModelBinary = isModelBinary;
+ params.m_ComputeDevices = computeDevices;
+ params.m_InputBindings = { inputName };
+ params.m_InputShapes = { *inputTensorShape };
+ params.m_OutputBindings = { outputName };
+ //params.m_EnableProfiling = enableProfiling;
+ params.m_SubgraphId = 0;
+ InferenceModel<TParser, TDataType> model(params, enableProfiling, runtime);
+
+ VideoCapture cap;
+ int input_type = INPUT_IMAGE;
+ std::string filename = inputTensorDataFilePath;
+
+ size_t i = filename.rfind("camera_live_input", filename.length());
+ if (i != string::npos)
+ {
+ cap = VideoCapture(1);
+ namedWindow("ARMNN MobileNet Example", WINDOW_AUTOSIZE | CV_GUI_NORMAL);
+ input_type = INPUT_CAMERA; //camera input
+ }
+ else if((filename.substr(filename.find_last_of(".") + 1) == "mp4") ||
+ (filename.substr(filename.find_last_of(".") + 1) == "mov") ||
+ (filename.substr(filename.find_last_of(".") + 1) == "avi") )
+ {
+ cap = VideoCapture(inputTensorDataFilePath);
+ if (! cap.isOpened())
+ {
+ std::cout << "Cannot open video input: " << inputTensorDataFilePath << std::endl;
+ return (-1);
+ }
+
+ namedWindow("ARMNN MobileNet Example", WINDOW_AUTOSIZE | CV_GUI_NORMAL);
+ input_type = INPUT_VIDEO; //video clip input
+ }
+ if (input_type != INPUT_IMAGE)
+ {
+ //set the callback function for any mouse event. Used for right click mouse to exit the program.
+ setMouseCallback("ARMNN MobileNet Example", imagenetCallBackFunc, NULL);
+ }
+
+ for (unsigned int i=0; i < number_frame; i++)
+ {
+ if (input_type != INPUT_IMAGE)
+ {
+ cap.grab();
+ cap.retrieve(test_image);
+ }
+ std::unique_ptr<ImageData> inputData = loadImageData(inputTensorDataFilePath, cap, test_image, input_type);
+ input.resize(inputData->m_size);
+
+ input = std::move(inputData->m_image);
+ input_resized = ResizeBilinear(input, inputData->m_width, inputData->m_height, inputData->m_chnum, 224, 224);
+
+ // Set up input data container
+ std::vector<TContainer> inputDataContainer(1, std::move(input_resized));
+
+ // Set up output data container
+ std::vector<TContainer> outputDataContainers;
+ outputDataContainers.push_back(std::vector<float>(model.GetOutputSize()));
+
+ //profile start
+ predictStart = high_resolution_clock::now();
+ // Execute model
+ model.Run(inputDataContainer, outputDataContainers);
+ //profile end
+ predictEnd = high_resolution_clock::now();
+
+ double timeTakenS = duration<double>(predictEnd - predictStart).count();
+ double preformance_ret = static_cast<double>(1.0/timeTakenS);
+
+ //retrieve output
+ std::vector<float>& outputData = (mapbox::util::get<std::vector<float>>(outputDataContainers[0]));
+ //output TOP predictions
+ std::string predict_target_name;
+ // find the out with the highest confidence
+ int label = static_cast<int>(std::distance(outputData.begin(), std::max_element(outputData.begin(), outputData.end())));
+ std::fstream file("/usr/share/arm/armnn/models/labels.txt");
+ //std::string predict_target_name;
+ for (int i=0; i <= label; i++)
+ {
+ std::getline(file, predict_target_name);
+ }
+ //get the probability of the top prediction
+ float prob = 100*outputData.data()[label];
+ //clean the top one so as to find the second top prediction
+ outputData.data()[label] = 0;
+ std::cout << "Top(1) prediction is " << predict_target_name << " with confidence: " << prob << "%" << std::endl;
+ //output next TOP 4 predictions
+ for (int ii=1; ii<5; ii++)
+ {
+ std::string predict_target_name_n;
+ // find the out with the highest confidence
+ int label = static_cast<int>(std::distance(outputData.begin(), std::max_element(outputData.begin(), outputData.end())));
+ std::fstream file("/usr/share/arm/armnn/models/labels.txt");
+ //std::string predict_target_name;
+ for (int i=0; i <= label; i++)
+ {
+ std::getline(file, predict_target_name_n);
+ }
+ //get the probability of the prediction
+ float prob = 100*outputData.data()[label];
+ //clean the top one so as to find the second top prediction
+ outputData.data()[label] = 0;
+
+ std::cout << "Top(" << (ii+1) << ") prediction is " << predict_target_name_n << " with confidence: " << prob << "%" << std::endl;
+ }
+ std::cout << "Performance (FPS): " << preformance_ret << std::endl;
+
+ if (input_type != INPUT_IMAGE)
+ {
+ //convert image format back to BGR for OpenCV imshow from RGB format required by armnn.
+ cv::cvtColor(test_image, test_image, CV_RGB2BGR);
+ // output identified object name on top of input image
+ cv::putText(test_image, predict_target_name,
+ cv::Point(rectCrop.x + 5,rectCrop.y + 20), // Coordinates
+ cv::FONT_HERSHEY_COMPLEX_SMALL, // Font
+ 1.0, // Scale. 2.0 = 2x bigger
+ cv::Scalar(0,0,255), // Color
+ 1, // Thickness
+ 8); // Line type
+
+ // output preformance in FPS on top of input image
+ std::string preformance_ret_string = "Performance (FPS): " + boost::lexical_cast<std::string>(preformance_ret);
+ cv::putText(test_image, preformance_ret_string,
+ cv::Point(rectCrop.x + 5,rectCrop.y + 40), // Coordinates
+ cv::FONT_HERSHEY_COMPLEX_SMALL, // Font
+ 1.0, // Scale. 2.0 = 2x bigger
+ cv::Scalar(0,0,255), // Color
+ 1, // Thickness
+ 8); // Line type
+
+ cv::imshow("ARMNN MobileNet Example", test_image);
+ waitKey(2);
+ }
+ }
+ }
+ catch (armnn::Exception const& e)
+ {
+ BOOST_LOG_TRIVIAL(fatal) << "Armnn Error: " << e.what();
+ return EXIT_FAILURE;
+ }
+ return EXIT_SUCCESS;
+}
+
+// This will run a test
+int RunTest(const std::string& modelFormat,
+ const std::string& inputTensorShapeStr,
+ const vector<armnn::BackendId>& computeDevice,
+ const std::string& modelPath,
+ const std::string& inputName,
+ const std::string& inputTensorDataFilePath,
+ const std::string& outputName,
+ bool enableProfiling,
+ const size_t subgraphId,
+ const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
+{
+ // Parse model binary flag from the model-format string we got from the command-line
+ bool isModelBinary;
+ if (modelFormat.find("bin") != std::string::npos)
+ {
+ isModelBinary = true;
+ }
+ else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos)
+ {
+ isModelBinary = false;
+ }
+ else
+ {
+ BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'binary' or 'text'";
+ return EXIT_FAILURE;
+ }
+
+ // Parse input tensor shape from the string we got from the command-line.
+ std::unique_ptr<armnn::TensorShape> inputTensorShape;
+ if (!inputTensorShapeStr.empty())
+ {
+ std::stringstream ss(inputTensorShapeStr);
+ std::vector<unsigned int> dims = ParseArray<unsigned int>(ss);
+ try
+ {
+ // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught.
+ inputTensorShape = std::make_unique<armnn::TensorShape>(dims.size(), dims.data());
+ }
+ catch (const armnn::InvalidArgumentException& e)
+ {
+ BOOST_LOG_TRIVIAL(fatal) << "Cannot create tensor shape: " << e.what();
+ return EXIT_FAILURE;
+ }
+ }
+ // Forward to implementation based on the parser type
+ if (modelFormat.find("caffe") != std::string::npos)
+ {
+#if defined(ARMNN_CAFFE_PARSER)
+ return MainImpl<armnnCaffeParser::ICaffeParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+ inputName.c_str(), inputTensorShape.get(),
+ inputTensorDataFilePath.c_str(), outputName.c_str(),
+ enableProfiling, subgraphId, runtime);
+#else
+ BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support.";
+ return EXIT_FAILURE;
+#endif
+ }
+ else if (modelFormat.find("onnx") != std::string::npos)
+ {
+#if defined(ARMNN_ONNX_PARSER)
+ return MainImpl<armnnOnnxParser::IOnnxParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+ inputName.c_str(), inputTensorShape.get(),
+ inputTensorDataFilePath.c_str(), outputName.c_str(),
+ enableProfiling, subgraphId, runtime);
+#else
+ BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support.";
+ return EXIT_FAILURE;
+#endif
+ }
+ else if (modelFormat.find("tensorflow") != std::string::npos)
+ {
+#if defined(ARMNN_TF_PARSER)
+ return MainImpl<armnnTfParser::ITfParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+ inputName.c_str(), inputTensorShape.get(),
+ inputTensorDataFilePath.c_str(), outputName.c_str(),
+ enableProfiling, subgraphId, runtime);
+#else
+ BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support.";
+ return EXIT_FAILURE;
+#endif
+ }
+ else if(modelFormat.find("tflite") != std::string::npos)
+ {
+#if defined(ARMNN_TF_LITE_PARSER)
+ if (! isModelBinary)
+ {
+ BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Only 'binary' format supported \
+ for tflite files";
+ return EXIT_FAILURE;
+ }
+ return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+ inputName.c_str(), inputTensorShape.get(),
+ inputTensorDataFilePath.c_str(), outputName.c_str(),
+ enableProfiling, subgraphId, runtime);
+#else
+ BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat <<
+ "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
+ return EXIT_FAILURE;
+#endif
+ }
+ else
+ {
+ BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat <<
+ "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
+ return EXIT_FAILURE;
+ }
+}
+
+int main(int argc, const char* argv[])
+{
+ // Configures logging for both the ARMNN library and this test program.
+#ifdef NDEBUG
+ armnn::LogSeverity level = armnn::LogSeverity::Info;
+#else
+ armnn::LogSeverity level = armnn::LogSeverity::Debug;
+#endif
+ armnn::ConfigureLogging(true, true, level);
+
+ std::string testCasesFile;
+
+ std::string modelFormat = "tensorflow-binary";
+ std::string modelPath = "/usr/share/arm/armnn/models/mobilenet_v1_1.0_224_frozen.pb";
+ std::string inputName = "input";
+ std::string inputTensorShapeStr = "1 224 224 3";
+ std::string inputTensorDataFilePath = "/usr/share/arm/armnn/testvecs/test2.mp4";
+ std::string outputName = "MobilenetV1/Predictions/Reshape_1";
+ std::vector<armnn::BackendId> computeDevices = {armnn::Compute::CpuAcc};
+ // Catch ctrl-c to ensure a clean exit
+ signal(SIGABRT, exit);
+ signal(SIGTERM, exit);
+
+ if (argc == 1)
+ {
+ return RunTest(modelFormat, inputTensorShapeStr, computeDevices,
+ modelPath, inputName, inputTensorDataFilePath, outputName, false, 1000);
+ }
+ else
+ {
+ size_t subgraphId = 0;
+ po::options_description desc("Options");
+ try
+ {
+ desc.add_options()
+ ("help", "Display usage information")
+ ("test-cases,t", po::value(&testCasesFile), "Path to a CSV file containing test cases to run. "
+ "If set, further parameters -- with the exception of compute device and concurrency -- will be ignored, "
+ "as they are expected to be defined in the file for each test in particular.")
+ ("concurrent,n", po::bool_switch()->default_value(false),
+ "Whether or not the test cases should be executed in parallel")
+ ("model-format,f", po::value(&modelFormat),
+ "caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or tensorflow-text.")
+ ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .caffemodel, .prototxt,"
+ " .tflite, .onnx")
+ ("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(),
+ "The preferred order of devices to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc")
+ ("input-name,i", po::value(&inputName), "Identifier of the input tensor in the network.")
+ ("input-tensor-shape,s", po::value(&inputTensorShapeStr),
+ "The shape of the input tensor in the network as a flat array of integers separated by whitespace. "
+ "This parameter is optional, depending on the network.")
+ ("input-tensor-data,d", po::value(&inputTensorDataFilePath),
+ "Input test file name. It can be image/video clip file name or use 'camera_live_input' to select camera input.")
+ ("output-name,o", po::value(&outputName), "Identifier of the output tensor in the network.")
+ ("event-based-profiling,e", po::bool_switch()->default_value(false),
+ "Enables built in profiler. If unset, defaults to off.")
+ ("number_frame", po::value<size_t>(&subgraphId)->default_value(1), "Number of frames to process.");
+ }
+ catch (const std::exception& e)
+ {
+ // Coverity points out that default_value(...) can throw a bad_lexical_cast,
+ // and that desc.add_options() can throw boost::io::too_few_args.
+ // They really won't in any of these cases.
+ BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+ BOOST_LOG_TRIVIAL(fatal) << "Fatal internal error: " << e.what();
+ return EXIT_FAILURE;
+ }
+
+ // Parses the command-line.
+ po::variables_map vm;
+ try
+ {
+ po::store(po::parse_command_line(argc, argv, desc), vm);
+ po::notify(vm);
+ }
+ catch (const po::error& e)
+ {
+ std::cerr << e.what() << std::endl << std::endl;
+ std::cerr << desc << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ // Run single test
+ // Get the preferred order of compute devices.
+ std::vector<armnn::BackendId> computeDevices = vm["compute"].as<std::vector<armnn::BackendId>>();
+ bool enableProfiling = vm["event-based-profiling"].as<bool>();
+
+ // Remove duplicates from the list of compute devices.
+ RemoveDuplicateDevices(computeDevices);
+
+ return RunTest(modelFormat, inputTensorShapeStr, computeDevices,
+ modelPath, inputName, inputTensorDataFilePath, outputName, enableProfiling, subgraphId);
+ }
+}
+
--
1.9.1

View File

@@ -1,60 +0,0 @@
From ee152f3b68f91c5fff336306d011becdcf3a6b17 Mon Sep 17 00:00:00 2001
From: Djordje Senicic <x0157990@ti.com>
Date: Sat, 24 Aug 2019 17:58:38 -0400
Subject: [PATCH] command line options for video port selection
- Add command line selection <0|1|2|3> of video port used for live camera input
Upstream-Status: Inappropriate [TI only test code]
Signed-off-by: Djordje Senicic <x0157990@ti.com>
---
tests/ArmnnExamples/ArmnnExamples.cpp | 23 ++++++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/tests/ArmnnExamples/ArmnnExamples.cpp b/tests/ArmnnExamples/ArmnnExamples.cpp
index 638fc145..d1526539 100644
--- a/tests/ArmnnExamples/ArmnnExamples.cpp
+++ b/tests/ArmnnExamples/ArmnnExamples.cpp
@@ -316,10 +316,27 @@ int MainImpl(const char* modelPath,
int input_type = INPUT_IMAGE;
std::string filename = inputTensorDataFilePath;
- size_t i = filename.rfind("camera_live_input", filename.length());
+ size_t i = filename.rfind("camera_live_input", filename.length());
if (i != string::npos)
{
- cap = VideoCapture(1);
+ int vport = 1;
+ size_t loc_i = filename.rfind("camera_live_input0", filename.length());
+ if(loc_i != string::npos) vport = 0;
+ else {
+ loc_i = filename.rfind("camera_live_input1", filename.length());
+ if(loc_i != string::npos) vport = 1;
+ else {
+ loc_i = filename.rfind("camera_live_input2", filename.length());
+ if(loc_i != string::npos) vport = 2;
+ else {
+ loc_i = filename.rfind("camera_live_input3", filename.length());
+ if(loc_i != string::npos) vport = 3;
+ else std::cout << "Setting ports beyond 3 not supported - using default!" << std::endl;
+ }
+ }
+ }
+ std::cout << "Using video" << vport << std::endl;
+ cap = VideoCapture(vport);
namedWindow("ARMNN MobileNet Example", WINDOW_AUTOSIZE | CV_GUI_NORMAL);
input_type = INPUT_CAMERA; //camera input
}
@@ -609,7 +626,7 @@ int main(int argc, const char* argv[])
"The shape of the input tensor in the network as a flat array of integers separated by whitespace. "
"This parameter is optional, depending on the network.")
("input-tensor-data,d", po::value(&inputTensorDataFilePath),
- "Input test file name. It can be image/video clip file name or use 'camera_live_input' to select camera input.")
+ "Input test file name. It can be image/video clip file name or 'camera_live_input or camera_live_input<0|1|2|3>' to select camera input.")
("output-name,o", po::value(&outputName), "Identifier of the output tensor in the network.")
("event-based-profiling,e", po::bool_switch()->default_value(false),
"Enables built in profiler. If unset, defaults to off.")
--
2.17.1

View File

@@ -1,28 +0,0 @@
From a3e266a2de7c45116428f4e21645a2657534191b Mon Sep 17 00:00:00 2001
From: Djordje Senicic <x0157990@ti.com>
Date: Mon, 26 Aug 2019 03:51:39 -0400
Subject: [PATCH] armnnexamples: update for 19.08 modifications
Upstream-Status: Inappropriate [TI only test code]
Signed-off-by: Djordje Senicic <x0157990@ti.com>
---
tests/ArmnnExamples/ArmnnExamples.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/ArmnnExamples/ArmnnExamples.cpp b/tests/ArmnnExamples/ArmnnExamples.cpp
index d1526539..c10a4fc0 100644
--- a/tests/ArmnnExamples/ArmnnExamples.cpp
+++ b/tests/ArmnnExamples/ArmnnExamples.cpp
@@ -310,7 +310,7 @@ int MainImpl(const char* modelPath,
params.m_OutputBindings = { outputName };
//params.m_EnableProfiling = enableProfiling;
params.m_SubgraphId = 0;
- InferenceModel<TParser, TDataType> model(params, enableProfiling, runtime);
+ InferenceModel<TParser, TDataType> model(params, enableProfiling, "", runtime);
VideoCapture cap;
int input_type = INPUT_IMAGE;
--
2.17.1

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b06c0b03d1447fb1c9222a8dc1f7fc0aac8dbd9defdf67087de2d305b1dbd323
size 28629675

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6af3453b6a0238f9734bbeb13e006f07f7a7a459a978a21423555819415fa328
size 28695424

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ca85052373c19d6816e9842b732b5b3433fefddc302621adac897b1f5b64487a
size 29099331

View File

@@ -1,18 +0,0 @@
--- armnn-24.08/include/armnn/Numpy.hpp.orig 2024-09-06 11:05:16.800066800 +0200
+++ armnn-24.08/include/armnn/Numpy.hpp 2024-09-06 11:05:58.717592900 +0200
@@ -157,7 +157,7 @@ namespace armnnNumpy
inline void CreateHeader(std::ifstream& ifStream, HeaderInfo& headerInfo, Header& header)
{
char stringBuffer[headerInfo.m_HeaderLen];
- ifStream.read(stringBuffer, headerInfo.m_HeaderLen);
+ ifStream.read(stringBuffer, static_cast<std::streamsize>(headerInfo.m_HeaderLen));
header.m_HeaderString = std::string(stringBuffer, headerInfo.m_HeaderLen);
// Remove new line character at the end of the string
@@ -403,4 +403,4 @@ namespace armnnNumpy
}
}
-#endif // NUMPY_HPP
\ No newline at end of file
+#endif // NUMPY_HPP

View File

@@ -1,19 +0,0 @@
--- armnn-19.08.orig/tests/CMakeLists.txt 2019-10-17 09:11:02.836949176 +0200
+++ armnn-19.08/tests/CMakeLists.txt 2019-10-17 09:10:50.384869262 +0200
@@ -1,6 +1,3 @@
-find_package( OpenCV REQUIRED )
-include_directories( ${OpenCV_INCLUDE_DIRS} )
-
# UnitTests
include(CheckIncludeFiles)
@@ -368,6 +365,9 @@ if(BUILD_ARMNN_QUANTIZER)
endif()
if (BUILD_ARMNN_EXAMPLES)
+ find_package( OpenCV REQUIRED )
+ include_directories( ${OpenCV_INCLUDE_DIRS} )
+
set(ArmnnExamples_sources
ArmnnExamples/ArmnnExamples.cpp)