diff --git a/0001-common-gpu-include-thread-and-limit-headers-to-fix-G.patch b/0001-common-gpu-include-thread-and-limit-headers-to-fix-G.patch new file mode 100644 index 0000000..0a4d6df --- /dev/null +++ b/0001-common-gpu-include-thread-and-limit-headers-to-fix-G.patch @@ -0,0 +1,38 @@ +From cfbefd8d744d4cdcdf3dd2f18576f487b36911b6 Mon Sep 17 00:00:00 2001 +From: Denis Samoilov +Date: Fri, 2 Apr 2021 19:46:22 -0700 +Subject: [PATCH] common, gpu: include thread and limit headers to fix GCC 11 + build issues + +--- + src/common/primitive_cache.hpp | 1 + + src/gpu/jit/ngen/ngen_auto_swsb.hpp | 1 + + 2 files changed, 2 insertions(+) + +diff --git a/src/common/primitive_cache.hpp b/src/common/primitive_cache.hpp +index 73cb1224f..05a3e53e5 100644 +--- a/src/common/primitive_cache.hpp ++++ b/src/common/primitive_cache.hpp +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + #include + + #include "c_types_map.hpp" +diff --git a/src/gpu/jit/ngen/ngen_auto_swsb.hpp b/src/gpu/jit/ngen/ngen_auto_swsb.hpp +index de3417af3..62ef2a571 100644 +--- a/src/gpu/jit/ngen/ngen_auto_swsb.hpp ++++ b/src/gpu/jit/ngen/ngen_auto_swsb.hpp +@@ -33,6 +33,7 @@ + + #include + #include ++#include + + namespace ngen { + namespace autoswsb { +-- +2.26.2 + diff --git a/oneDNN-2.2.3.tar.gz b/oneDNN-2.2.3.tar.gz new file mode 100644 index 0000000..cc02d3a --- /dev/null +++ b/oneDNN-2.2.3.tar.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07e5cc2a30e7bb5a381eba04f8579f427372132ed3d44363f5fd89850a7b50fd +size 9534138 diff --git a/onednn-1045.patch b/onednn-1045.patch new file mode 100644 index 0000000..2c3ae20 --- /dev/null +++ b/onednn-1045.patch @@ -0,0 +1,21 @@ +From a94acd4e2dfaf51552dd2a60b059df1c1f14e452 Mon Sep 17 00:00:00 2001 +From: Alexandre Truong +Date: Wed, 28 Apr 2021 10:32:35 +0100 +Subject: [PATCH] cpu: aarch64: missing include for arm_compute::Scheduler + +--- + src/cpu/aarch64/acl_indirect_gemm_convolution.hpp | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/src/cpu/aarch64/acl_indirect_gemm_convolution.hpp b/src/cpu/aarch64/acl_indirect_gemm_convolution.hpp +index 86d2bed73..040311f8c 100644 +--- a/src/cpu/aarch64/acl_indirect_gemm_convolution.hpp ++++ b/src/cpu/aarch64/acl_indirect_gemm_convolution.hpp +@@ -26,6 +26,7 @@ + + #include "arm_compute/runtime/FunctionDescriptors.h" + #include "arm_compute/runtime/NEON/NEFunctions.h" ++#include "arm_compute/runtime/Scheduler.h" + + namespace dnnl { + namespace impl { diff --git a/onednn-2.2.1.tar.gz b/onednn-2.2.1.tar.gz deleted file mode 100644 index 36f9a0b..0000000 --- a/onednn-2.2.1.tar.gz +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3faf3b7811dd37835169a9d5c57f17fd591f062029851186f9b11db117b9d1d9 -size 9533694 diff --git a/onednn.changes b/onednn.changes index 3fe412d..d3f4c24 100644 --- a/onednn.changes +++ b/onednn.changes @@ -1,3 +1,50 @@ +------------------------------------------------------------------- +Thu Jun 3 01:38:56 UTC 2021 - Ferdinand Thiessen + +- Update to version 2.2.3 + * Fixed a bug in int8 depthwise convolution ptimitive with groups + and 1d spatial size for processors with AVX-512 and AVX2 support + * Fixed correctness issue for PReLU primitive + * Fixed corretness issue in reorder for blocked layouts with + zero padding + * Improved performance of weights reorders used by BRGEMM-based + convolution primitive for processors with AVX-512 support + * Added -fp-model=precise build flag for DPC++ code + * Fixed potential memory leak in matmul primitive + * Fixed performance of matmul primitive when fused with bias + update and sum + * Fixed a bug in matmul primitive when writing to non-contiguous + destination buffer +- Add upstream patch for GCC11 support + * 0001-common-gpu-include-thread-and-limit-headers-to-fix-G.patch + +------------------------------------------------------------------- +Thu May 27 08:10:13 UTC 2021 - Jan Engelhardt + +- Update descriptions. + +------------------------------------------------------------------- +Wed May 26 13:29:27 UTC 2021 - Guillaume GARDET + +- Update to 2.2.2, changes: + * Fixed performance regression in fp32 forward inner product for + shapes with number of output channels equal to 1 for processors + with Intel AVX-512 support (714b1fd) + * Fixed performance regression in forward convolutions with groups + for processors with Intel AVX-512 support(3555d4a) + * Removed -std=c++11 build flag for DPC++ headers (1fcb867) + * Fixed buffer access in initializing workspace in RNN + implementation on GPU (9b03091) + * Fixed fix a bug in convolution with 1x1 kernel and mixed + strides on processors with Intel AVX-512 support (d0b3e3f) + * Used getauxval for Linux to get CPU features on for AArch64 + systems (25c4cea) + * Added -fp-model=precise build flag for DPC++ code (3e40e5e) + * Fixed out-of-bounds writes in elementwise primitive on + Intel Processor Graphics (bcf823c) +- Fix build with Arm Compute Library: + * onednn-1045.patch + ------------------------------------------------------------------- Tue Apr 13 07:53:16 UTC 2021 - Guillaume GARDET diff --git a/onednn.spec b/onednn.spec index 2a2e683..23b8a88 100644 --- a/onednn.spec +++ b/onednn.spec @@ -31,12 +31,16 @@ %define libname libdnnl2 Name: onednn -Version: 2.2.1 +Version: 2.2.3 Release: 0 -Summary: Intel(R) Math Kernel Library for Deep Neural Networks +Summary: Intel Math Kernel Library for Deep Neural Networks License: Apache-2.0 URL: https://01.org/onednn -Source0: https://github.com/oneapi-src/oneDNN/archive/v%{version}/%{name}-%{version}.tar.gz +Source0: https://github.com/oneapi-src/oneDNN/archive/v%{version}/oneDNN-%{version}.tar.gz +# PATCH-FIX-UPSTREAM onednn-1045.patch -- https://github.com/oneapi-src/oneDNN/pull/1045 +Patch0: onednn-1045.patch +# PATCH-FIX-UPSTREAM 0001-common-gpu-include-thread-and-limit-headers-to-fix-G.patch +Patch1: 0001-common-gpu-include-thread-and-limit-headers-to-fix-G.patch BuildRequires: cmake BuildRequires: doxygen BuildRequires: fdupes @@ -57,18 +61,18 @@ Obsoletes: mkl-dnn <= %{version} Provides: oneDNN = %{version} %description -Intel(R) Math Kernel Library for Deep Neural Networks (Intel(R) MKL-DNN) is an +Intel Math Kernel Library for Deep Neural Networks (Intel MKL-DNN) is an open-source performance library for deep-learning applications. The library accelerates deep-learning applications and frameworks on Intel architecture. Intel MKL-DNN contains vectorized and threaded building blocks that you can use to implement deep neural networks (DNN) with C and C++ interfaces. %package -n benchdnn -Summary: Header files of Intel(R) Math Kernel Library +Summary: Header files of Intel Math Kernel Library Requires: %{libname} = %{version} %description -n benchdnn -Intel(R) Math Kernel Library for Deep Neural Networks (Intel(R) MKL-DNN) is an +Intel Math Kernel Library for Deep Neural Networks (Intel MKL-DNN) is an open-source performance library for deep-learning applications. The library accelerates deep-learning applications and frameworks on Intel architecture. Intel MKL-DNN contains vectorized and threaded building blocks that you can use @@ -77,43 +81,42 @@ to implement deep neural networks (DNN) with C and C++ interfaces. This package only includes the benchmark utility including its input files. %package devel -Summary: Header files of Intel(R) Math Kernel Library +Summary: Header files of Intel Math Kernel Library Requires: %{libname} = %{version} Provides: mkl-dnn-devel = %{version} Obsoletes: mkl-dnn-devel <= %{version} Provides: oneDNN-devel = %{version} %description devel -Intel(R) Math Kernel Library for Deep Neural Networks (Intel(R) MKL-DNN) is an +Intel Math Kernel Library for Deep Neural Networks (Intel MKL-DNN) is an open-source performance library for deep-learning applications. The library accelerates deep-learning applications and frameworks on Intel architecture. Intel MKL-DNN contains vectorized and threaded building blocks that you can use to implement deep neural networks (DNN) with C and C++ interfaces. This package includes the required headers and library files to develop software -with the Intel(R) MKL-DNN. +with the Intel MKL-DNN. %package doc -Summary: Reference documentation for the Intel(R) Math Kernel Library +Summary: Reference documentation for the Intel Math Kernel Library BuildArch: noarch %description doc -The reference documentation for the Intel(R) Math Kernel Library can be installed +The reference documentation for the Intel Math Kernel Library can be installed with this package. %package -n %{libname} -Summary: Header files of Intel(R) Math Kernel Library +Summary: Header files of Intel Math Kernel Library %description -n %{libname} -Intel(R) Math Kernel Library for Deep Neural Networks (Intel(R) MKL-DNN) is an +Intel Math Kernel Library for Deep Neural Networks (Intel MKL-DNN) is an open-source performance library for deep-learning applications. The library accelerates deep-learning applications and frameworks on Intel architecture. Intel MKL-DNN contains vectorized and threaded building blocks that you can use to implement deep neural networks (DNN) with C and C++ interfaces. %prep -%setup -q -n oneDNN-%{version} -%autopatch -p1 +%autosetup -p1 -n oneDNN-%{version} %build %cmake \ @@ -167,6 +170,7 @@ popd %{_datadir}/benchdnn %files devel +%doc README.md %{_includedir}/mkl-dnn %{_includedir}/mkldnn*.h* %{_includedir}/dnnl*.h* @@ -185,7 +189,6 @@ popd %files -n %{libname} %license LICENSE -%doc README.md %{_libdir}/libdnnl.so.* %{_libdir}/libmkldnn.so.*