From 23a3a6d23d6f86eaa4df1c8d0c2538cc0b5b5b4f97696df6d842412108e79a2e Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Sun, 10 Nov 2024 10:26:58 +0000 Subject: [PATCH 1/2] OBS-URL: https://build.opensuse.org/package/show/devel:languages:python:numeric/python-numba?expand=0&rev=97 --- .gitattributes | 23 + .gitignore | 1 + _multibuild | 5 + numba-0.59.1.tar.gz | 3 + numba-0.60.0.tar.gz | 3 + numpy21.patch | 389 ++++++++++ python-numba.changes | 1534 ++++++++++++++++++++++++++++++++++++++ python-numba.spec | 191 +++++ skip-failing-tests.patch | 71 ++ 9 files changed, 2220 insertions(+) create mode 100644 .gitattributes create mode 100644 .gitignore create mode 100644 _multibuild create mode 100644 numba-0.59.1.tar.gz create mode 100644 numba-0.60.0.tar.gz create mode 100644 numpy21.patch create mode 100644 python-numba.changes create mode 100644 python-numba.spec create mode 100644 skip-failing-tests.patch diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..9b03811 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,23 @@ +## Default LFS +*.7z filter=lfs diff=lfs merge=lfs -text +*.bsp filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.gem filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.jar filter=lfs diff=lfs merge=lfs -text +*.lz filter=lfs diff=lfs merge=lfs -text +*.lzma filter=lfs diff=lfs merge=lfs -text +*.obscpio filter=lfs diff=lfs merge=lfs -text +*.oxt filter=lfs diff=lfs merge=lfs -text +*.pdf filter=lfs diff=lfs merge=lfs -text +*.png filter=lfs diff=lfs merge=lfs -text +*.rpm filter=lfs diff=lfs merge=lfs -text +*.tbz filter=lfs diff=lfs merge=lfs -text +*.tbz2 filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.ttf filter=lfs diff=lfs merge=lfs -text +*.txz filter=lfs diff=lfs merge=lfs -text +*.whl filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..57affb6 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +.osc diff --git a/_multibuild b/_multibuild new file mode 100644 index 0000000..6d8cafe --- /dev/null +++ b/_multibuild @@ -0,0 +1,5 @@ + + test-py310 + test-py311 + test-py312 + diff --git a/numba-0.59.1.tar.gz b/numba-0.59.1.tar.gz new file mode 100644 index 0000000..e45b0d2 --- /dev/null +++ b/numba-0.59.1.tar.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76f69132b96028d2774ed20415e8c528a34e3299a40581bae178f0994a2f370b +size 2652730 diff --git a/numba-0.60.0.tar.gz b/numba-0.60.0.tar.gz new file mode 100644 index 0000000..26d56b2 --- /dev/null +++ b/numba-0.60.0.tar.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5df6158e5584eece5fc83294b949fd30b9f1125df7708862205217e068aabf16 +size 2702171 diff --git a/numpy21.patch b/numpy21.patch new file mode 100644 index 0000000..398a1ee --- /dev/null +++ b/numpy21.patch @@ -0,0 +1,389 @@ +From 5f2a3d60fc9c1e25bec9fd6de0b0b8bae6f142da Mon Sep 17 00:00:00 2001 +From: kc611 +Date: Mon, 30 Sep 2024 23:08:18 +0530 +Subject: [PATCH 01/22] Added NumPy 2.1 Support + +--- + numba/cpython/randomimpl.py | 6 ++- + numba/cuda/tests/cudapy/test_debug.py | 2 +- + numba/np/arrayobj.py | 6 +-- + numba/np/math/numbers.py | 5 +++ + numba/np/npyfuncs.py | 58 +++++++++++++++++---------- + numba/np/old_arraymath.py | 2 + + numba/np/random/old_distributions.py | 4 +- + numba/np/ufunc_db.py | 36 +++++++++++++++++ + numba/tests/test_array_methods.py | 19 ++++++--- + 9 files changed, 105 insertions(+), 33 deletions(-) + +Index: numba-0.60.0/numba/cpython/randomimpl.py +=================================================================== +--- numba-0.60.0.orig/numba/cpython/randomimpl.py ++++ numba-0.60.0/numba/cpython/randomimpl.py +@@ -17,7 +17,7 @@ from numba.core.imputils import (Registr + from numba.core.typing import signature + from numba.core import types, cgutils + from numba.core.errors import NumbaTypeError +- ++from numba.np.random._constants import LONG_MAX + + registry = Registry('randomimpl') + lower = registry.lower +@@ -1798,6 +1798,10 @@ def zipf_impl(a): + U = 1.0 - np.random.random() + V = np.random.random() + X = int(math.floor(U ** (-1.0 / am1))) ++ ++ if (X > LONG_MAX or X < 1.0): ++ continue ++ + T = (1.0 + 1.0 / X) ** am1 + if X >= 1 and V * X * (T - 1.0) / (b - 1.0) <= (T / b): + return X +Index: numba-0.60.0/numba/np/arrayobj.py +=================================================================== +--- numba-0.60.0.orig/numba/np/arrayobj.py ++++ numba-0.60.0/numba/np/arrayobj.py +@@ -1932,17 +1932,23 @@ def numpy_geomspace(start, stop, num=50) + raise ValueError('Geometric sequence cannot include zero') + start = result_dtype(start) + stop = result_dtype(stop) +- both_imaginary = (start.real == 0) & (stop.real == 0) +- both_negative = (np.sign(start) == -1) & (np.sign(stop) == -1) +- out_sign = 1 +- if both_imaginary: +- start = start.imag +- stop = stop.imag +- out_sign = 1j +- if both_negative: +- start = -start +- stop = -stop +- out_sign = -out_sign ++ if numpy_version < (2, 0): ++ both_imaginary = (start.real == 0) & (stop.real == 0) ++ both_negative = (np.sign(start) == -1) & (np.sign(stop) == -1) ++ out_sign = 1 ++ if both_imaginary: ++ start = start.imag ++ stop = stop.imag ++ out_sign = 1j ++ if both_negative: ++ start = -start ++ stop = -stop ++ out_sign = -out_sign ++ else: ++ out_sign = np.sign(start) ++ start /= out_sign ++ stop /= out_sign ++ + logstart = np.log10(start) + logstop = np.log10(stop) + result = np.logspace(logstart, logstop, num) +@@ -2144,11 +2150,18 @@ def array_reshape_vararg(context, builde + return array_reshape(context, builder, new_sig, new_args) + + +-@overload(np.reshape) +-def np_reshape(a, newshape): +- def np_reshape_impl(a, newshape): +- return a.reshape(newshape) +- return np_reshape_impl ++if numpy_version < (2, 1): ++ @overload(np.reshape) ++ def np_reshape(a, newshape): ++ def np_reshape_impl(a, newshape): ++ return a.reshape(newshape) ++ return np_reshape_impl ++else: ++ @overload(np.reshape) ++ def np_reshape(a, shape): ++ def np_reshape_impl(a, shape): ++ return a.reshape(shape) ++ return np_reshape_impl + + + @overload(np.resize) +Index: numba-0.60.0/numba/np/math/numbers.py +=================================================================== +--- numba-0.60.0.orig/numba/np/math/numbers.py ++++ numba-0.60.0/numba/np/math/numbers.py +@@ -397,6 +397,11 @@ def int_abs_impl(context, builder, sig, + return impl_ret_untracked(context, builder, sig.return_type, res) + + ++def identity_impl(context, builder, sig, args): ++ [x] = args ++ return impl_ret_untracked(context, builder, sig.return_type, x) ++ ++ + def uint_abs_impl(context, builder, sig, args): + [x] = args + return impl_ret_untracked(context, builder, sig.return_type, x) +Index: numba-0.60.0/numba/np/npyfuncs.py +=================================================================== +--- numba-0.60.0.orig/numba/np/npyfuncs.py ++++ numba-0.60.0/numba/np/npyfuncs.py +@@ -16,6 +16,7 @@ from numba.core import typing, types, er + from numba.core.extending import register_jitable + from numba.np import npdatetime + from numba.np.math import cmathimpl, mathimpl, numbers ++from numba.np.numpy_support import numpy_version + + # some NumPy constants. Note that we could generate some of them using + # the math library, but having the values copied from npy_math seems to +@@ -580,29 +581,42 @@ def np_complex_sign_impl(context, builde + # equivalent to complex sign in NumPy's sign + # but implemented via selects, balancing the 4 cases. + _check_arity_and_homogeneity(sig, args, 1) +- op = args[0] +- ty = sig.args[0] +- float_ty = ty.underlying_float + +- ZERO = context.get_constant(float_ty, 0.0) +- ONE = context.get_constant(float_ty, 1.0) +- MINUS_ONE = context.get_constant(float_ty, -1.0) +- NAN = context.get_constant(float_ty, float('nan')) +- result = context.make_complex(builder, ty) +- result.real = ZERO +- result.imag = ZERO +- +- cmp_sig = typing.signature(types.boolean, *[ty] * 2) +- cmp_args = [op, result._getvalue()] +- arg1_ge_arg2 = np_complex_ge_impl(context, builder, cmp_sig, cmp_args) +- arg1_eq_arg2 = np_complex_eq_impl(context, builder, cmp_sig, cmp_args) +- arg1_lt_arg2 = np_complex_lt_impl(context, builder, cmp_sig, cmp_args) +- +- real_when_ge = builder.select(arg1_eq_arg2, ZERO, ONE) +- real_when_nge = builder.select(arg1_lt_arg2, MINUS_ONE, NAN) +- result.real = builder.select(arg1_ge_arg2, real_when_ge, real_when_nge) ++ if numpy_version >= (2, 0): ++ # NumPy >= 2.0.0 ++ def complex_sign(z): ++ abs = math.hypot(z.real, z.imag) ++ if abs == 0: ++ return 0 + 0j ++ else: ++ return z / abs ++ ++ res = context.compile_internal(builder, complex_sign, sig, args) ++ return impl_ret_untracked(context, builder, sig.return_type, res) ++ else: ++ op = args[0] ++ ty = sig.args[0] ++ result = context.make_complex(builder, ty) ++ float_ty = ty.underlying_float ++ ++ ZERO = context.get_constant(float_ty, 0.0) ++ ONE = context.get_constant(float_ty, 1.0) ++ MINUS_ONE = context.get_constant(float_ty, -1.0) ++ NAN = context.get_constant(float_ty, float('nan')) ++ ++ result.real = ZERO ++ result.imag = ZERO ++ cmp_sig = typing.signature(types.boolean, *[ty] * 2) ++ cmp_args = [op, result._getvalue()] ++ arg1_ge_arg2 = np_complex_ge_impl(context, builder, cmp_sig, cmp_args) ++ arg1_eq_arg2 = np_complex_eq_impl(context, builder, cmp_sig, cmp_args) ++ arg1_lt_arg2 = np_complex_lt_impl(context, builder, cmp_sig, cmp_args) ++ ++ real_when_ge = builder.select(arg1_eq_arg2, ZERO, ONE) ++ real_when_nge = builder.select(arg1_lt_arg2, MINUS_ONE, NAN) ++ result.real = builder.select(arg1_ge_arg2, real_when_ge, real_when_nge) + +- return result._getvalue() ++ return result._getvalue() + + + ######################################################################## +Index: numba-0.60.0/numba/np/ufunc_db.py +=================================================================== +--- numba-0.60.0.orig/numba/np/ufunc_db.py ++++ numba-0.60.0/numba/np/ufunc_db.py +@@ -583,16 +583,58 @@ def _fill_ufunc_db(ufunc_db): + 'f->f': npyfuncs.np_real_floor_impl, + 'd->d': npyfuncs.np_real_floor_impl, + } ++ if numpy_version >= (2, 1): ++ ufunc_db[np.floor].update({ ++ '?->?': numbers.identity_impl, ++ 'b->b': numbers.identity_impl, ++ 'B->B': numbers.identity_impl, ++ 'h->h': numbers.identity_impl, ++ 'H->H': numbers.identity_impl, ++ 'i->i': numbers.identity_impl, ++ 'I->I': numbers.identity_impl, ++ 'l->l': numbers.identity_impl, ++ 'L->L': numbers.identity_impl, ++ 'q->q': numbers.identity_impl, ++ 'Q->Q': numbers.identity_impl, ++ }) + + ufunc_db[np.ceil] = { + 'f->f': npyfuncs.np_real_ceil_impl, + 'd->d': npyfuncs.np_real_ceil_impl, + } ++ if numpy_version >= (2, 1): ++ ufunc_db[np.ceil].update({ ++ '?->?': numbers.identity_impl, ++ 'b->b': numbers.identity_impl, ++ 'B->B': numbers.identity_impl, ++ 'h->h': numbers.identity_impl, ++ 'H->H': numbers.identity_impl, ++ 'i->i': numbers.identity_impl, ++ 'I->I': numbers.identity_impl, ++ 'l->l': numbers.identity_impl, ++ 'L->L': numbers.identity_impl, ++ 'q->q': numbers.identity_impl, ++ 'Q->Q': numbers.identity_impl, ++ }) + + ufunc_db[np.trunc] = { + 'f->f': npyfuncs.np_real_trunc_impl, + 'd->d': npyfuncs.np_real_trunc_impl, + } ++ if numpy_version >= (2, 1): ++ ufunc_db[np.trunc].update({ ++ '?->?': numbers.identity_impl, ++ 'b->b': numbers.identity_impl, ++ 'B->B': numbers.identity_impl, ++ 'h->h': numbers.identity_impl, ++ 'H->H': numbers.identity_impl, ++ 'i->i': numbers.identity_impl, ++ 'I->I': numbers.identity_impl, ++ 'l->l': numbers.identity_impl, ++ 'L->L': numbers.identity_impl, ++ 'q->q': numbers.identity_impl, ++ 'Q->Q': numbers.identity_impl, ++ }) + + ufunc_db[np.fabs] = { + 'f->f': npyfuncs.np_real_fabs_impl, +Index: numba-0.60.0/numba/tests/test_array_methods.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_array_methods.py ++++ numba-0.60.0/numba/tests/test_array_methods.py +@@ -774,13 +774,20 @@ class TestArrayMethods(MemoryLeakMixin, + check_arr(arr.reshape((2, 3, 4))) + check_arr(arr.reshape((2, 3, 4)).T) + check_arr(arr.reshape((2, 3, 4))[::2]) +- for v in (0.0, 1.5, float('nan')): +- arr = np.array([v]).reshape(()) +- check_arr(arr) + + arr = np.array(["Hello", "", "world"]) + check_arr(arr) + ++ for v in (0.0, 1.5, float('nan')): ++ arr = np.array([v]).reshape(()) ++ if numpy_version < (2, 1): ++ check_arr(arr) ++ else: ++ with self.assertRaises(ValueError) as raises: ++ njit((typeof(arr),))(pyfunc) ++ self.assertEqual(str(raises.exception), ++ "Calling nonzero on 0d arrays is not allowed. Use np.atleast_1d(scalar).nonzero() instead.") ++ + def test_array_nonzero(self): + self.check_nonzero(array_nonzero) + +Index: numba-0.60.0/docs/upcoming_changes/9741.highlight.rst +=================================================================== +--- /dev/null ++++ numba-0.60.0/docs/upcoming_changes/9741.highlight.rst +@@ -0,0 +1,4 @@ ++Added Support for NumPy 2.1 ++--------------------------- ++ ++This release adds support for NumPy 2.1 (excluding the NEP-050 semantics). +Index: numba-0.60.0/numba/tests/test_ufuncs.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_ufuncs.py ++++ numba-0.60.0/numba/tests/test_ufuncs.py +@@ -18,7 +18,6 @@ from numba.np import numpy_support + from numba.core.registry import cpu_target + from numba.core.base import BaseContext + from numba.np import ufunc_db +-from numba.tests.support import expected_failure_np2 + + is32bits = tuple.__itemsize__ == 4 + iswindows = sys.platform.startswith('win32') +@@ -1696,8 +1695,6 @@ class TestLoopTypesComplex(_LoopTypesTes + + + TestLoopTypesComplex.autogenerate() +-expected_failure_np2(TestLoopTypesComplex.test_sign_F_F) +-expected_failure_np2(TestLoopTypesComplex.test_sign_D_D) + + + class TestLoopTypesDatetime(_LoopTypesTester): +Index: numba-0.60.0/numba/core/typing/arraydecl.py +=================================================================== +--- numba-0.60.0.orig/numba/core/typing/arraydecl.py ++++ numba-0.60.0/numba/core/typing/arraydecl.py +@@ -415,6 +415,11 @@ class ArrayAttribute(AttributeTemplate): + def resolve_nonzero(self, ary, args, kws): + assert not args + assert not kws ++ if ary.ndim == 0 and numpy_version >= (2, 1): ++ raise ValueError( ++ "Calling nonzero on 0d arrays is not allowed." ++ " Use np.atleast_1d(scalar).nonzero() instead." ++ ) + # 0-dim arrays return one result array + ndim = max(ary.ndim, 1) + retty = types.UniTuple(types.Array(types.intp, 1, 'C'), ndim) +Index: numba-0.60.0/numba/np/random/_constants.py +=================================================================== +--- numba-0.60.0.orig/numba/np/random/_constants.py ++++ numba-0.60.0/numba/np/random/_constants.py +@@ -1,4 +1,5 @@ + import numpy as np ++import ctypes + + # These constants are directly obtained from: + # https://github.com/numpy/numpy/blob/caccd283941b0bade7b71056138ded5379b1625f/numpy/random/src/distributions/ziggurat_constants.h +@@ -1222,6 +1223,7 @@ UINT8_MAX = 255 + UINT16_MAX = 65535 + UINT32_MAX = 4294967295 + UINT64_MAX = 18446744073709551615 ++LONG_MAX = (1 << ( 8 * ctypes.sizeof(ctypes.c_long) - 1)) - 1 + + LS2PI = 0.91893853320467267 + TWELFTH = 0.083333333333333333333333 +Index: numba-0.60.0/numba/__init__.py +=================================================================== +--- numba-0.60.0.orig/numba/__init__.py ++++ numba-0.60.0/numba/__init__.py +@@ -34,13 +34,13 @@ def _ensure_critical_deps(): + import numpy as np + numpy_version = extract_version(np) + +- if numpy_version < (1, 22): +- msg = (f"Numba needs NumPy 1.22 or greater. Got NumPy " ++ if numpy_version < (1, 24): ++ msg = (f"Numba needs NumPy 1.24 or greater. Got NumPy " + f"{numpy_version[0]}.{numpy_version[1]}.") + raise ImportError(msg) + +- if numpy_version > (2, 0): +- msg = (f"Numba needs NumPy 2.0 or less. Got NumPy " ++ if numpy_version > (2, 1): ++ msg = (f"Numba needs NumPy 2.1 or less. Got NumPy " + f"{numpy_version[0]}.{numpy_version[1]}.") + raise ImportError(msg) + +Index: numba-0.60.0/numba/np/random/distributions.py +=================================================================== +--- a/numba/np/random/distributions.py ++++ b/numba/np/random/distributions.py +@@ -394,8 +394,10 @@ def random_geometric(bitgen, p): + def random_zipf(bitgen, a): + am1 = a - 1.0 + b = pow(2.0, am1) ++ Umin = pow(INT64_MAX, -am1) + while 1: +- U = 1.0 - next_double(bitgen) ++ U01 = next_double(bitgen) ++ U = U01*Umin + (1 - U01) + V = next_double(bitgen) + X = np.floor(pow(U, -1.0 / am1)) + if (X > INT64_MAX or X < 1.0): diff --git a/python-numba.changes b/python-numba.changes new file mode 100644 index 0000000..9395517 --- /dev/null +++ b/python-numba.changes @@ -0,0 +1,1534 @@ +------------------------------------------------------------------- +Tue Oct 29 20:01:54 UTC 2024 - Dirk Müller + +- skip python313 + +------------------------------------------------------------------- +Mon Oct 21 13:14:22 UTC 2024 - Markéta Machová + +- Add upstream patch numpy21.patch to enable support for NumPy 2.1 + +------------------------------------------------------------------- +Mon Jul 1 05:10:03 UTC 2024 - Steve Kowalik + +- Update to 0.60.0: + * NumPy 2.0 Binary Support + * New Features + + IEnhance guvectorize support in JIT code + + IAdd experimental support for ufunc.at + + IAdd float() ctor + + IAdd support for math.log2. + + IAdd math.nextafter support for nopython mode. + + IAdd support for parfor binop reductions. + * Improvements + + Expand isinstance() support for NumPy datetime types + + Python 3.12 sys.monitoring support is added to Numba's dispatcher. + * NumPy Support + + Added support for np.size() + * CUDA API Changes + + Support for compilation to LTO-IR + + Support math.log, math.log2 and math.log10 in CUDA + * Bug Fixes + + Fix parfor variable hoisting analysis. + +------------------------------------------------------------------- +Tue May 28 09:30:26 UTC 2024 - Daniel Garcia + +- Skip broken test on ppc64le + bsc#1225394, gh#numba/numba#8489 + +------------------------------------------------------------------- +Fri Mar 22 20:05:25 UTC 2024 - Dirk Müller + +- update to 0.59.1: + * Fixed caching of kernels that use target-specific overloads + * Fixed a performance regression introduced in Numba 0.59 which + made ``np.searchsorted`` considerably slower. + * This patch fixes two issues with ``np.searchsorted``. First, + a regression is fixed in the support of ``np.datetime64``. + Second, adopt ``NAT``-aware comparisons to fix mishandling + of ``NAT`` value. + * Allow use of Python 3.12 PEP-695 type parameter syntax + +------------------------------------------------------------------- +Fri Mar 8 15:37:58 UTC 2024 - Ben Greiner + +- Stop testing python39: dropped since ipython 8.19 + +------------------------------------------------------------------- +Wed Feb 21 15:35:47 UTC 2024 - Ben Greiner + +- Simplify test flavor logic +- Prepare for python39 flavor drop: Exclude build in empty test + flavors +- Don't test on 32bit-platforms + +------------------------------------------------------------------- +Sat Feb 3 07:04:27 UTC 2024 - Dirk Müller + +- update to 0.59.0 + * Python 3.12 support + * minimum supported version to 3.9 + * Add support for ufunc attributes and reduce + * Add a config variable to enable / disable the llvmlite memory + manager + * see https://numba.readthedocs.io/en/stable/release/0.59.0-notes.html#highlights + +------------------------------------------------------------------- +Mon Nov 20 12:15:07 UTC 2023 - Markéta Machová + +- Update to 0.58.1 + * Added towncrier + * The minimum supported NumPy version is 1.22. + * Add support for NumPy 1.26 + * Remove NVVM 3.4 and CTK 11.0 / 11.1 support + * Removal of Windows 32-bit Support + * The minimum llvmlite version is now 0.41.0. + * Added RVSDG-frontend +- Drop merged patches: + * numba-pr9105-np1.25.patch + * multiprocessing-context.patch + +------------------------------------------------------------------- +Tue Sep 19 12:08:03 UTC 2023 - Markéta Machová + +- Add multiprocessing-context.patch fixing tests for Python 3.11.5 + +------------------------------------------------------------------- +Mon Aug 21 19:53:19 UTC 2023 - Ben Greiner + +- Add numba-pr9105-np1.25.patch, raise (reintroduced) numpy pin + * gh#numba/numba#9105 + * Adapted gh#numba/numba#9138 + +------------------------------------------------------------------- +Mon Aug 14 06:47:15 UTC 2023 - Dirk Müller + +- update to 0.57.1: + * fix regressions with 0.57.0 +- remove upper bound on numpy - upstream does not have it either + +------------------------------------------------------------------- +Fri May 26 13:28:26 UTC 2023 - Steve Kowalik + +- Update to 0.57.0: + * Support for Python 3.11 (minimum is moved to 3.8) + * Support for NumPy 1.24 (minimum is moved to 1.21) + * Python language support enhancements: + + Exception classes now support arguments that are not compile time + constant. + + The built-in functions hasattr and getattr are supported for compile + time constant attributes. + + The built-in functions str and repr are now implemented similarly to + their Python implementations. Custom __str__ and __repr__ functions + can be associated with types and work as expected. + + Numba’s unicode functionality in str.startswith now supports kwargs + start and end. + + min and max now support boolean types. + + Support is added for the dict(iterable) constructor. +- Dropped patches: + * numba-pr8620-np1.24.patch + * update-tbb-backend-calls-2021.6.patch +- Rebased existing patch. + +------------------------------------------------------------------- +Wed Apr 12 05:53:24 UTC 2023 - Steve Kowalik + +- Clean up leftover Python 3.8 gubbins, look forward to Python 3.11 support. + +------------------------------------------------------------------- +Tue Apr 11 08:30:00 UTC 2023 - Dominique Leuenberger + +- Remove test-py38 flavor from multibuild: Python 3.8 is no longer + supported. + +------------------------------------------------------------------- +Tue Jan 3 12:13:00 UTC 2023 - Ben Greiner + +- Split out python flavors into testing multibuilds. Depending on + the obs worker, the test suite can take almost an hour per + flavor. +- Replace allow-numpy-1.24.patch with an updated + numba-pr8620-np1.24.patch to also work with still present numpy + 1.23 in Factory (discussed upstream in gh#numba/numba#8620) +- Merge fix-cli-test.patch into skip-failing-tests.patch + +------------------------------------------------------------------- +Mon Jan 2 21:27:24 UTC 2023 - Ben Greiner + +- Clean up the specfile + * restore the multibuild + * Patch allow-numpy-1.24.patch is the WIP gh#numba/numba#8620 + +------------------------------------------------------------------- +Sun Jan 1 11:41:11 UTC 2023 - Matej Cepl + +- Update to 0.56.4: + - This is a bugfix release to fix a regression in the CUDA + target in relation to the .view() method on CUDA device + arrays that is present when using NumPy version 1.23.0 or + later. + - This is a bugfix release to remove the version restriction + applied to the setuptools package and to fix a bug in the + CUDA target in relation to copying zero length device arrays + to zero length host arrays. +- Add allow-numpy-1.24.patch to allow work with numpy 1.24 + +------------------------------------------------------------------- +Mon Oct 10 10:07:52 UTC 2022 - John Vandenberg + +- Allow numpy 1.23 + +------------------------------------------------------------------- +Mon Oct 3 12:02:05 UTC 2022 - Daniel Garcia + +- Update to 0.56.2 + This release continues to add new features, bug fixes and stability + improvements to Numba. Please note that this will be the last release that + has support for Python 3.7 as the next release series (Numba 0.57) will + support Python 3.11! Also note that, this will be the last release to support + linux-32 packages produced by the Numba team. + +- Remove fix-max-name-size.patch, it's included in the new version. +- Add update-tbb-backend-calls-2021.6.patch to make it compatible with the + latest tbb-devel version. +- Add fix-cli-test.patch to disable one test that fails with OBS. + +------------------------------------------------------------------- +Mon Jul 11 16:05:33 UTC 2022 - Ben Greiner + +- Update to 0.55.2 + * This is a maintenance release to support NumPy 1.22 and Apple + M1. + * Backport #8027: Support for NumPy 1.22 + * update max NumPy for 0.55.2 + * Backport #8052 Ensure pthread is linked in when building for + ppc64le. + * Backport #8102 to fix numpy requirements + * Backport #8109 Pin TBB support with respect to incompatible + 2021.6 API. + +------------------------------------------------------------------- +Sat Jan 29 13:23:43 UTC 2022 - Ben Greiner + +- Update to 0.55.1 + * This is a bugfix release that closes all the remaining issues + from the accelerated release of 0.55.0 and also any release + critical regressions discovered since then. + * CUDA target deprecation notices: + - Support for CUDA toolkits < 10.2 is deprecated and will be + removed in Numba 0.56. + - Support for devices with Compute Capability < 5.3 is + deprecated and will be removed in Numba 0.56. +- Drop numba-pr7748-random32bitwidth.patch +- Explicitly declare supported platforms (avoid failing tests on + ppc64) + +------------------------------------------------------------------- +Fri Jan 14 16:55:37 UTC 2022 - Ben Greiner + +- Update to 0.55.0 + * This release includes a significant number important dependency + upgrades along with a number of new features and bug fixes. + * NOTE: Due to NumPy CVE-2021-33430 this release has bypassed the + usual release process so as to promptly provide a Numba release + that supports NumPy 1.21. A single release candidate (RC1) was + made and a few issues were reported, these are summarised as + follows and will be fixed in a subsequent 0.55.1 release. + * Known issues with this release: + - Incorrect result copying array-typed field of structured + array (#7693) + - Two issues in DebugInfo generation (#7726, #7730) + - Compilation failure for hash of floating point values on 32 + bit Windows when using Python 3.10 (#7713). + * Support for Python 3.10 + * Support for NumPy 1.21 + * The minimum supported NumPy version is raised to 1.18 for + runtime (compilation however remains compatible with NumPy + 1.11). + * Experimental support for isinstance. + * The following functions are now supported: + - np.broadcast_to + - np.float_power + - np.cbrt + - np.logspace + - np.take_along_axis + - np.average + - np.argmin gains support for the axis kwarg. + - np.ndarray.astype gains support for types expressed as + literal strings. + * For users of the Numba extension API, Numba now has a new error + handling mode whereby it will treat all exceptions that do not + inherit from numba.errors.NumbaException as a “hard error” and + immediately unwind the stack. This makes it much easier to + debug when writing @overloads etc from the extension API as + there’s now no confusion between Python errors and Numba + errors. This feature can be enabled by setting the environment + variable: NUMBA_CAPTURED_ERRORS='new_style'. + * The threading layer selection priority can now be changed via + the environment variable NUMBA_THREADING_LAYER_PRIORITY. + * Support for NVIDIA’s CUDA Python bindings. + * Support for 16-bit floating point numbers and their basic + operations via intrinsics. + * Streams are provided in the Stream.async_done result, making it + easier to implement asynchronous work queues. + * Support for structured types in device arrays, character + sequences in NumPy arrays, and some array operations on nested + arrays. + * Much underlying refactoring to align the CUDA target more + closely with the CPU target, which lays the groudwork for + supporting the high level extension API in CUDA in future + releases. + * Intel also kindly sponsored research and development into + native debug (DWARF) support and handling per-function + compilation flags: + * Line number/location tracking is much improved. + * Numba’s internal representation of containers (e.g. tuples, + arrays) are now encoded as structures. + * Numba’s per-function compilation flags are encoded into the ABI + field of the mangled name of the function such that it’s + possible to compile and differentiate between versions of the + same function with different flags set. + * There are no new general deprecations. + * There are no new CUDA target deprecations. +- Drop numba-pr7483-numpy1_21.patch +- Add numba-pr7748-random32bitwidth.patch -- gh#numba/numba#7748 + +------------------------------------------------------------------- +Sat Jan 8 22:19:07 UTC 2022 - Ben Greiner + +- Numba <0.55 is not compatible with Python 3.10 or NumPy 1.22 + gh#numba/numba#7557 +- Add test skip to numba-pr7483-numpy1_21.patch due to numpy update + gh#numpy/numpy#20376 + +------------------------------------------------------------------- +Thu Nov 18 18:42:21 UTC 2021 - Ben Greiner + +- Update to 0.54.1 + * This is a bugfix release for 0.54.0. It fixes a regression in + structured array type handling, a potential leak on + initialization failure in the CUDA target, a regression caused + by Numba’s vendored cloudpickle module resetting dynamic + classes and a few minor testing/infrastructure related + problems. +- Release summary for 0.54.0 + * This release includes a significant number of new features, + important refactoring, critical bug fixes and a number of + dependency upgrades. + * Python language support enhancements: + - Basic support for f-strings. + - dict comprehensions are now supported. + - The sum built-in function is implemented. + * NumPy features/enhancements, The following functions are now + supported: + - np.clip + - np.iscomplex + - np.iscomplexobj + - np.isneginf + - np.isposinf + - np.isreal + - np.isrealobj + - np.isscalar + - np.random.dirichlet + - np.rot90 + - np.swapaxes + * Also np.argmax has gained support for the axis keyword argument + and it’s now possible to use 0d NumPy arrays as scalars in + __setitem__ calls. + + Internal changes: + * Debugging support through DWARF has been fixed and enhanced. + * Numba now optimises the way in which locals are emitted to help + reduce time spend in LLVM’s SROA passes. + + CUDA target changes: + * Support for emitting lineinfo to be consumed by profiling tools + such as Nsight Compute + * Improved fastmath code generation for various trig, division, + and other functions + * Faster compilation using lazy addition of libdevice to compiled + units + * Support for IPC on Windows + * Support for passing tuples to CUDA ufuncs + * Performance warnings: + - When making implicit copies by calling a kernel on arrays in + host memory + - When occupancy is poor due to kernel or ufunc/gufunc + configuration + * Support for implementing warp-aggregated intrinsics: + - Using support for more CUDA functions: activemask(), + lanemask_lt() + - The ffs() function now works correctly! + * Support for @overload in the CUDA target + + Intel kindly sponsored research and development that lead to a + number of new features and internal support changes: + * Dispatchers can now be retargetted to a new target via a user + defined context manager. + * Support for custom NumPy array subclasses has been added + (including an overloadable memory allocator). + * An inheritance based model for targets that permits targets to + share @overload implementations. + * Per function compiler flags with inheritance behaviours. + * The extension API now has support for overloading class methods + via the @overload_classmethod decorator. + + Deprecations: + * The ROCm target (for AMD ROC GPUs) has been moved to an + “unmaintained” status and a seperate repository stub has been + created for it at: https://github.com/numba/numba-rocm + + CUDA target deprecations and breaking changes: + * Relaxed strides checking is now the default when computing the + contiguity of device arrays. + * The inspect_ptx() method is deprecated. For use cases that + obtain PTX for further compilation outside of Numba, use + compile_ptx() instead. + * Eager compilation of device functions (the case when + device=True and a signature is provided) is deprecated. + + Version support/dependency changes: + * LLVM 11 is now supported on all platforms via llvmlite. + * The minimum supported Python version is raised to 3.7. + * NumPy version 1.20 is supported. + * The minimum supported NumPy version is raised to 1.17 for + runtime (compilation however remains compatible with NumPy + 1.11). + * Vendor cloudpickle v1.6.0 – now used for all pickle operations. + * TBB >= 2021 is now supported and all prior versions are + unsupported (not easily possible to maintain the ABI breaking + changes). +- Full release notes; + https://numba.readthedocs.io/en/0.54.1/release-notes.html +- Drop patches merged upstream: + * packaging-ignore-setuptools-deprecation.patch + * numba-pr6851-llvm-timings.patch +- Refresh skip-failing-tests.patch, fix-max-name-size.patch +- Add numba-pr7483-numpy1_21.patch gh#numba/numba#7176, + gh#numba/numba#7483 + +------------------------------------------------------------------- +Wed Mar 17 16:51:46 UTC 2021 - Ben Greiner + +- Update to 0.53.0 + * Support for Python 3.9 + * Function sub-typing + * Initial support for dynamic gufuncs (i.e. from @guvectorize) + * Parallel Accelerator (@njit(parallel=True) now supports + Fortran ordered arrays + * Full release notes at + https://numba.readthedocs.io/en/0.53.0/release-notes.html +- Don't unpin-llvmlite.patch. It really need to be the correct + version. +- Refresh skip-failing-tests.patch +- Add packaging-ignore-setuptools-deprecation.patch + gh#numba/numba#6837 +- Add numba-pr6851-llvm-timings.patch gh#numba/numba#6851 in order + to fix 32-bit issues gh#numba/numba#6832 + +------------------------------------------------------------------- +Wed Feb 17 09:49:48 UTC 2021 - Ben Greiner + +- Update to 0.52.0 + https://numba.readthedocs.io/en/stable/release-notes.html + This release focuses on performance improvements, but also adds + some new features and contains numerous bug fixes and stability + improvements. + Highlights of core performance improvements include: + * Intel kindly sponsored research and development into producing + a new reference count pruning pass. This pass operates at the + LLVM level and can prune a number of common reference counting + patterns. This will improve performance for two primary + reasons: + - There will be less pressure on the atomic locks used to do + the reference counting. + - Removal of reference counting operations permits more + inlining and the optimisation passes can in general do more + with what is present. + (Siu Kwan Lam). + * Intel also sponsored work to improve the performance of the + numba.typed.List container, particularly in the case of + __getitem__ and iteration (Stuart Archibald). + * Superword-level parallelism vectorization is now switched on + and the optimisation pipeline has been lightly analysed and + tuned so as to be able to vectorize more and more often + (Stuart Archibald). + Highlights of core feature changes include: + * The inspect_cfg method on the JIT dispatcher object has been + significantly enhanced and now includes highlighted output and + interleaved line markers and Python source (Stuart Archibald). + * The BSD operating system is now unofficially supported (Stuart + Archibald). + * Numerous features/functionality improvements to NumPy support, + including support for: + - np.asfarray (Guilherme Leobas) + - “subtyping” in record arrays (Lucio Fernandez-Arjona) + - np.split and np.array_split (Isaac Virshup) + - operator.contains with ndarray (@mugoh). + - np.asarray_chkfinite (Rishabh Varshney). + - NumPy 1.19 (Stuart Archibald). + - the ndarray allocators, empty, ones and zeros, accepting a + dtype specified as a string literal (Stuart Archibald). + * Booleans are now supported as literal types (Alexey Kozlov). + * On the CUDA target: + * CUDA 9.0 is now the minimum supported version (Graham Markall). + * Support for Unified Memory has been added (Max Katz). + * Kernel launch overhead is reduced (Graham Markall). + * Cudasim support for mapped array, memcopies and memset has + been * added (Mike Williams). + * Access has been wired in to all libdevice functions (Graham + Markall). + * Additional CUDA atomic operations have been added (Michae + Collison). + * Additional math library functions (frexp, ldexp, isfinite) + (Zhihao * Yuan). + * Support for power on complex numbers (Graham Markall). + Deprecations to note: + * There are no new deprecations. However, note that + “compatibility” mode, which was added some 40 releases ago to + help transition from 0.11 to 0.12+, has been removed! Also, + the shim to permit the import of jitclass from Numba’s top + level namespace has now been removed as per the deprecation + schedule. +- NEP 29: Skip python36 build. Python 3.6 is dropped by NumPy 1.20 + +------------------------------------------------------------------- +Mon Nov 2 16:34:48 UTC 2020 - Marketa Calabkova + +- Update to 0.51.2 + * The compilation chain is now based on LLVM 10 (Valentin Haenel). + * Numba has internally switched to prefer non-literal types over literal ones so + as to reduce function over-specialisation, this with view of speeding up + compile times (Siu Kwan Lam). + * On the CUDA target: Support for CUDA Toolkit 11, Ampere, and Compute + Capability 8.0; Printing of ``SASS`` code for kernels; Callbacks to Python + functions can be inserted into CUDA streams, and streams are async awaitable; + Atomic ``nanmin`` and ``nanmax`` functions are added; Fixes for various + miscompilations and segfaults. (mostly Graham Markall; call backs on + streams by Peter Würtz). + * Support for heterogeneous immutable lists and heterogeneous immutable string + key dictionaries. Also optional initial/construction value capturing for all + lists and dictionaries containing literal values (Stuart Archibald). + * A new pass-by-reference mutable structure extension type ``StructRef`` (Siu + Kwan Lam). + * Object mode blocks are now cacheable, with the side effect of numerous bug + fixes and performance improvements in caching. This also permits caching of + functions defined in closures (Siu Kwan Lam). + * The error handling and reporting system has been improved to reduce the size + of error messages, and also improve quality and specificity. + * The CUDA target has more stream constructors available and a new function for + compiling to PTX without linking and loading the code to a device. Further, + the macro-based system for describing CUDA threads and blocks has been + replaced with standard typing and lowering implementations, for improved + debugging and extensibility. +- Better unpin llvmlite with unpin-llvmlite.patch to avoid breakages + +------------------------------------------------------------------- +Wed May 27 07:24:32 UTC 2020 - pgajdos@suse.com + +- version update to 0.49.1 + * PR #5587: Fixed #5586 Threading Implementation Typos + * PR #5592: Fixes #5583 Remove references to cffi_support from docs and examples + * PR #5614: Fix invalid type in resolve for comparison expr in parfors. + * PR #5624: Fix erroneous rewrite of predicate to bit const on prune. + * PR #5627: Fixes #5623, SSA local def scan based on invalid equality + assumption. + * PR #5629: Fixes naming error in array_exprs + * PR #5630: Fix #5570. Incorrect race variable detection due to SSA naming. + * PR #5638: Make literal_unroll function work as a freevar. + * PR #5648: Unset the memory manager after EMM Plugin tests + * PR #5651: Fix some SSA issues + * PR #5652: Pin to sphinx=2.4.4 to avoid problem with C declaration + * PR #5658: Fix unifying undefined first class function types issue + * PR #5669: Update example in 5m guide WRT SSA type stability. + * PR #5676: Restore ``numba.types`` as public API + +------------------------------------------------------------------- +Fri Apr 24 14:07:35 UTC 2020 - Marketa Calabkova + +- Update to 0.49.0 + * Removal of all Python 2 related code and also updating the minimum supported + Python version to 3.6, the minimum supported NumPy version to 1.15 and the + minimum supported SciPy version to 1.0. (Stuart Archibald). + * Refactoring of the Numba code base. The code is now organised into submodules + by functionality. This cleans up Numba's top level namespace. + (Stuart Archibald). + * Introduction of an ``ir.Del`` free static single assignment form for Numba's + intermediate representation (Siu Kwan Lam and Stuart Archibald). + * An OpenMP-like thread masking API has been added for use with code using the + parallel CPU backends (Aaron Meurer and Stuart Archibald). + * For the CUDA target, all kernel launches now require a configuration, this + preventing accidental launches of kernels with the old default of a single + thread in a single block. The hard-coded autotuner is also now removed, such + tuning is deferred to CUDA API calls that provide the same functionality + (Graham Markall). + * The CUDA target also gained an External Memory Management plugin interface to + allow Numba to use another CUDA-aware library for all memory allocations and + deallocations (Graham Markall). + * The Numba Typed List container gained support for construction from iterables + (Valentin Haenel). + * Experimental support was added for first-class function types + (Pearu Peterson). +- Refreshed patch skip-failing-tests.patch + * the troublesome tests are skipped upstream on 32-bit +- Unpin llvmlite + +------------------------------------------------------------------- +Mon Apr 6 07:56:16 UTC 2020 - Tomáš Chvátal + +- Switch to multibuilt as the tests take ages to build and we + could speed things up in 2 loops + +------------------------------------------------------------------- +Fri Feb 21 09:39:07 UTC 2020 - Tomáš Chvátal + +- Update to 0.48.0: + * Many fixes for llvm/cuda updates; see CHANGE_LOG for details + * Drop python2 support +- Add one more failing test to skip: + * skip-failing-tests.patch + +------------------------------------------------------------------- +Tue Dec 17 23:28:40 CET 2019 - Matej Cepl + +- Clean up SPEC file (mostly just testing new python-llvmlite + package) + +------------------------------------------------------------------- +Thu Oct 24 20:55:10 UTC 2019 - Todd R + +- Restore python2 support. + +------------------------------------------------------------------- +Thu Sep 26 08:06:01 UTC 2019 - Tomáš Chvátal + +- Update to 0.46.0: + * Many fixes and changes for llvm/cuda updates + See CHANGE_LOG file for details +- Add fix-max-name-size.patch to fix issue with numba + identifier length on recent LLVM versions. +- Remove test from skip-failing-tests.patch fixed by + fix-max-name-size.patch. The test is important, if it is failing + numba will not work reliably. + +------------------------------------------------------------------- +Thu Sep 26 08:06:01 UTC 2019 - Tomáš Chvátal + +- Update to 0.45.1: + * Many fixes and changes for llvm/cuda updates + See CHANGE_LOG file for details +- Update skip-failing-tests.patch to skip one more failing test + +------------------------------------------------------------------- +Thu Apr 11 21:52:30 CEST 2019 - Matej Cepl + +- Update to 0.43.1, which is a bugfix release. + +------------------------------------------------------------------- +Mon Mar 18 18:05:34 CET 2019 - Matej Cepl + +- Update to 0.43.0: + - Initial support for statically typed dictionaries + - Improvements to `hash()` to match Python 3 behavior + - Support for the heapq module + - Ability to pass C structs to Numba + - More NumPy functions: asarray, trapz, roll, ptp, extract +- Add skip-failing-tests.patch to avoid problems with possibly + incompatible version of NumPy 1.16. + +------------------------------------------------------------------- +Sat Jan 26 17:06:14 UTC 2019 - Arun Persaud + +- specfile: + * update copyright year + +- update to version 0.42.0: + * In this release the major features are: + + The capability to launch and attach the GDB debugger from within + a jitted function. + + The upgrading of LLVM to version 7.0.0. + * We added a draft of the project roadmap to the developer + manual. The roadmap is for informational purposes only as + priorities and resources may change. + * Here are some enhancements from contributed PRs: + + #3532. Daniel Wennberg improved the "cuda.{pinned, mapped}" API + so that the associated memory is released immediately at the + exit of the context manager. + + #3531. Dimitri Vorona enabled the inlining of jitclass methods. + + #3516. Simon Perkins added the support for passing numpy dtypes + (i.e. "np.dtype("int32")") and their type constructor + (i.e. "np.int32") into a jitted function. + + #3509. Rob Ennis added support for "np.corrcoef". + * A regression issue (#3554, #3461) relating to making an empty + slice in parallel mode is resolved by #3558. + * General Enhancements: + + PR #3392: Launch and attach gdb directly from Numba. + + PR #3437: Changes to accommodate LLVM 7.0.x + + PR #3509: Support for np.corrcoef + + PR #3516: Typeof dtype values + + PR #3520: Fix @stencil ignoring cval if out kwarg supplied. + + PR #3531: Fix jitclass method inlining and avoid unnecessary + increfs + + PR #3538: Avoid future C-level assertion error due to invalid + visibility + + PR #3543: Avoid implementation error being hidden by the + try-except + + PR #3544: Add `long_running` test flag and feature to exclude + tests. + + PR #3549: ParallelAccelerator caching improvements + + PR #3558: Fixes array analysis for inplace binary operators. + + PR #3566: Skip alignment tests on armv7l. + + PR #3567: Fix unifying literal types in namedtuple + + PR #3576: Add special copy routine for NumPy out arrays + + PR #3577: Fix example and docs typos for `objmode` context + manager. reorder statements. + + PR #3580: Use alias information when determining whether it is + safe to + + PR #3583: Use `ir.unknown_loc` for unknown `Loc`, as #3390 with + tests + + PR #3587: Fix llvm.memset usage changes in llvm7 + + PR #3596: Fix Array Analysis for Global Namedtuples + + PR #3597: Warn users if threading backend init unsafe. + + PR #3605: Add guard for writing to read only arrays from ufunc + calls + + PR #3606: Improve the accuracy of error message wording for + undefined type. + + PR #3611: gdb test guard needs to ack ptrace permissions + + PR #3616: Skip gdb tests on ARM. + * CUDA Enhancements: + + PR #3532: Unregister temporarily pinned host arrays at once + + PR #3552: Handle broadcast arrays correctly in host->device + transfer. + + PR #3578: Align cuda and cuda simulator kwarg names. + * Documentation Updates: + + PR #3545: Fix @njit description in 5 min guide + + PR #3570: Minor documentation fixes for numba.cuda + + PR #3581: Fixing minor typo in `reference/types.rst` + + PR #3594: Changing `@stencil` docs to correctly reflect + `func_or_mode` param + + PR #3617: Draft roadmap as of Dec 2018 + +------------------------------------------------------------------- +Sat Dec 1 18:34:28 UTC 2018 - Arun Persaud + +- update to version 0.41.0: + * major features: + + Diagnostics showing the optimizations done by + ParallelAccelerator + + Support for profiling Numba-compiled functions in Intel VTune + + Additional NumPy functions: partition, nancumsum, nancumprod, + ediff1d, cov, conj, conjugate, tri, tril, triu + + Initial support for Python 3 Unicode strings + * General Enhancements: + + PR #1968: armv7 support + + PR #2983: invert mapping b/w binop operators and the operator + module #2297 + + PR #3160: First attempt at parallel diagnostics + + PR #3307: Adding NUMBA_ENABLE_PROFILING envvar, enabling jit + event + + PR #3320: Support for np.partition + + PR #3324: Support for np.nancumsum and np.nancumprod + + PR #3325: Add location information to exceptions. + + PR #3337: Support for np.ediff1d + + PR #3345: Support for np.cov + + PR #3348: Support user pipeline class in with lifting + + PR #3363: string support + + PR #3373: Improve error message for empty imprecise lists. + + PR #3375: Enable overload(operator.getitem) + + PR #3402: Support negative indexing in tuple. + + PR #3414: Refactor Const type + + PR #3416: Optimized usage of alloca out of the loop + + PR #3424: Updates for llvmlite 0.26 + + PR #3462: Add support for `np.conj/np.conjugate`. + + PR #3480: np.tri, np.tril, np.triu - default optional args + + PR #3481: Permit dtype argument as sole kwarg in np.eye + * CUDA Enhancements: + + PR #3399: Add max_registers Option to cuda.jit + * Continuous Integration / Testing: + + PR #3303: CI with Azure Pipelines + + PR #3309: Workaround race condition with apt + + PR #3371: Fix issues with Azure Pipelines + + PR #3362: Fix #3360: `RuntimeWarning: 'numba.runtests' found in + sys.modules` + + PR #3374: Disable openmp in wheel building + + PR #3404: Azure Pipelines templates + + PR #3419: Fix cuda tests and error reporting in test discovery + + PR #3491: Prevent faulthandler installation on armv7l + + PR #3493: Fix CUDA test that used negative indexing behaviour + that's fixed. + + PR #3495: Start Flake8 checking of Numba source + * Fixes: + + PR #2950: Fix dispatcher to only consider contiguous-ness. + + PR #3124: Fix 3119, raise for 0d arrays in reductions + + PR #3228: Reduce redundant module linking + + PR #3329: Fix AOT on windows. + + PR #3335: Fix memory management of __cuda_array_interface__ + views. + + PR #3340: Fix typo in error name. + + PR #3365: Fix the default unboxing logic + + PR #3367: Allow non-global reference to objmode() + context-manager + + PR #3381: Fix global reference in objmode for dynamically + created function + + PR #3382: CUDA_ERROR_MISALIGNED_ADDRESS Using Multiple Const + Arrays + + PR #3384: Correctly handle very old versions of colorama + + PR #3394: Add 32bit package guard for non-32bit installs + + PR #3397: Fix with-objmode warning + + PR #3403 Fix label offset in call inline after parfor pass + + PR #3429: Fixes raising of user defined exceptions for + exec(). + + PR #3432: Fix error due to function naming in CI in py2.7 + + PR #3444: Fixed TBB's single thread execution and test added for + #3440 + + PR #3449: Allow matching non-array objects in find_callname() + + PR #3455: Change getiter and iternext to not be pure. Resolves + #3425 + + PR #3467: Make ir.UndefinedType singleton class. + + PR #3478: Fix np.random.shuffle sideeffect + + PR #3487: Raise unsupported for kwargs given to `print()` + + PR #3488: Remove dead script. + + PR #3498: Fix stencil support for boolean as return type + + PR #3511: Fix handling make_function literals (regression of + #3414) + + PR #3514: Add missing unicode != unicode + + PR #3527: Fix complex math sqrt implementation for large -ve + values + + PR #3530: This adds arg an check for the pattern supplied to + Parfors. + + PR #3536: Sets list dtor linkage to `linkonce_odr` to fix + visibility in AOT + * Documentation Updates: + + PR #3316: Update 0.40 changelog with additional PRs + + PR #3318: Tweak spacing to avoid search box wrapping onto second + line + + PR #3321: Add note about memory leaks with exceptions to + docs. Fixes #3263 + + PR #3322: Add FAQ on CUDA + fork issue. Fixes #3315. + + PR #3343: Update docs for argsort, kind kwarg partially + supported. + + PR #3357: Added mention of njit in 5minguide.rst + + PR #3434: Fix parallel reduction example in docs. + + PR #3452: Fix broken link and mark up problem. + + PR #3484: Size Numba logo in docs in em units. Fixes #3313 + + PR #3502: just two typos + + PR #3506: Document string support + + PR #3513: Documentation for parallel diagnostics. + + PR #3526: Fix 5 min guide with respect to @njit decl + +------------------------------------------------------------------- +Fri Oct 26 21:28:50 UTC 2018 - Jan Engelhardt + +- Use noun phrase in summary. + +------------------------------------------------------------------- +Fri Oct 26 19:45:47 UTC 2018 - Todd R + +- Update to Version 0.40.1 + * PR #3338: Accidentally left Anton off contributor list for 0.40.0 + * PR #3374: Disable OpenMP in wheel building + * PR #3376: Update 0.40.1 changelog and docs on OpenMP backend +- Update to Version 0.40.0 + + This release adds a number of major features: + * A new GPU backend: kernels for AMD GPUs can now be compiled using the ROCm + driver on Linux. + * The thread pool implementation used by Numba for automatic multithreading + is configurable to use TBB, OpenMP, or the old "workqueue" implementation. + (TBB is likely to become the preferred default in a future release.) + * New documentation on thread and fork-safety with Numba, along with overall + improvements in thread-safety. + * Experimental support for executing a block of code inside a nopython mode + function in object mode. + * Parallel loops now allow arrays as reduction variables + * CUDA improvements: FMA, faster float64 atomics on supporting hardware, + records in const memory, and improved datatime dtype support + * More NumPy functions: vander, tri, triu, tril, fill_diagonal + + General Enhancements: + * PR #3017: Add facility to support with-contexts + * PR #3033: Add support for multidimensional CFFI arrays + * PR #3122: Add inliner to object mode pipeline + * PR #3127: Support for reductions on arrays. + * PR #3145: Support for np.fill_diagonal + * PR #3151: Keep a queue of references to last N deserialized functions. Fixes #3026 + * PR #3154: Support use of list() if typeable. + * PR #3166: Objmode with-block + * PR #3179: Updates for llvmlite 0.25 + * PR #3181: Support function extension in alias analysis + * PR #3189: Support literal constants in typing of object methods + * PR #3190: Support passing closures as literal values in typing + * PR #3199: Support inferring stencil index as constant in simple unary expressions + * PR #3202: Threading layer backend refactor/rewrite/reinvention! + * PR #3209: Support for np.tri, np.tril and np.triu + * PR #3211: Handle unpacking in building tuple (BUILD_TUPLE_UNPACK opcode) + * PR #3212: Support for np.vander + * PR #3227: Add NumPy 1.15 support + * PR #3272: Add MemInfo_data to runtime._nrt_python.c_helpers + * PR #3273: Refactor. Removing thread-local-storage based context nesting. + * PR #3278: compiler threadsafety lockdown + * PR #3291: Add CPU count and CFS restrictions info to numba -s. + + CUDA Enhancements: + * PR #3152: Use cuda driver api to get best blocksize for best occupancy + * PR #3165: Add FMA intrinsic support + * PR #3172: Use float64 add Atomics, Where Available + * PR #3186: Support Records in CUDA Const Memory + * PR #3191: CUDA: fix log size + * PR #3198: Fix GPU datetime timedelta types usage + * PR #3221: Support datetime/timedelta scalar argument to a CUDA kernel. + * PR #3259: Add DeviceNDArray.view method to reinterpret data as a different type. + * PR #3310: Fix IPC handling of sliced cuda array. + + ROCm Enhancements: + * PR #3023: Support for AMDGCN/ROCm. + * PR #3108: Add ROC info to `numba -s` output. + * PR #3176: Move ROC vectorize init to npyufunc + * PR #3177: Add auto_synchronize support to ROC stream + * PR #3178: Update ROC target documentation. + * PR #3294: Add compiler lock to ROC compilation path. + * PR #3280: Add wavebits property to the HSA Agent. + * PR #3281: Fix ds_permute types and add tests + + Continuous Integration / Testing: + * PR #3091: Remove old recipes, switch to test config based on env var. + * PR #3094: Add higher ULP tolerance for products in complex space. + * PR #3096: Set exit on error in incremental scripts + * PR #3109: Add skip to test needing jinja2 if no jinja2. + * PR #3125: Skip cudasim only tests + * PR #3126: add slack, drop flowdock + * PR #3147: Improve error message for arg type unsupported during typing. + * PR #3128: Fix recipe/build for jetson tx2/ARM + * PR #3167: In build script activate env before installing. + * PR #3180: Add skip to broken test. + * PR #3216: Fix libcuda.so loading in some container setup + * PR #3224: Switch to new Gitter notification webhook URL and encrypt it + * PR #3235: Add 32bit Travis CI jobs + * PR #3257: This adds scipy/ipython back into windows conda test phase. + + Fixes: + * PR #3038: Fix random integer generation to match results from NumPy. + * PR #3045: Fix #3027 - Numba reassigns sys.stdout + * PR #3059: Handler for known LoweringErrors. + * PR #3060: Adjust attribute error for NumPy functions. + * PR #3067: Abort simulator threads on exception in thread block. + * PR #3079: Implement +/-(types.boolean) Fix #2624 + * PR #3080: Compute np.var and np.std correctly for complex types. + * PR #3088: Fix #3066 (array.dtype.type in prange) + * PR #3089: Fix invalid ParallelAccelerator hoisting issue. + * PR #3136: Fix #3135 (lowering error) + * PR #3137: Fix for issue3103 (race condition detection) + * PR #3142: Fix Issue #3139 (parfors reuse of reduction variable across prange blocks) + * PR #3148: Remove dead array equal @infer code + * PR #3153: Fix canonicalize_array_math typing for calls with kw args + * PR #3156: Fixes issue with missing pygments in testing and adds guards. + * PR #3168: Py37 bytes output fix. + * PR #3171: Fix #3146. Fix CFUNCTYPE void* return-type handling + * PR #3193: Fix setitem/getitem resolvers + * PR #3222: Fix #3214. Mishandling of POP_BLOCK in while True loop. + * PR #3230: Fixes liveness analysis issue in looplifting + * PR #3233: Fix return type difference for 32bit ctypes.c_void_p + * PR #3234: Fix types and layout for `np.where`. + * PR #3237: Fix DeprecationWarning about imp module + * PR #3241: Fix #3225. Normalize 0nd array to scalar in typing of indexing code. + * PR #3256: Fix #3251: Move imports of ABCs to collections.abc for Python >= 3.3 + * PR #3292: Fix issue3279. + * PR #3302: Fix error due to mismatching dtype + + Documentation Updates: + * PR #3104: Workaround for #3098 (test_optional_unpack Heisenbug) + * PR #3132: Adds an ~5 minute guide to Numba. + * PR #3194: Fix docs RE: np.random generator fork/thread safety + * PR #3242: Page with Numba talks and tutorial links + * PR #3258: Allow users to choose the type of issue they are reporting. + * PR #3260: Fixed broken link + * PR #3266: Fix cuda pointer ownership problem with user/externally allocated pointer + * PR #3269: Tweak typography with CSS + * PR #3270: Update FAQ for functions passed as arguments + * PR #3274: Update installation instructions + * PR #3275: Note pyobject and voidptr are types in docs + * PR #3288: Do not need to call parallel optimizations "experimental" anymore + * PR #3318: Tweak spacing to avoid search box wrapping onto second line +- Remove upstream-included numba-0.39.0-fix-3135.patch + +------------------------------------------------------------------- +Fri Jul 20 13:09:58 UTC 2018 - mcepl@suse.com + +- Add patch numba-0.39.0-fix-3135.patch to make not fail datashader + tests. (https://github.com/bokeh/datashader/issues/620) + +------------------------------------------------------------------- +Fri Jul 13 09:20:32 UTC 2018 - tchvatal@suse.com + +- Fix version requirement to ask for new llvmlite + +------------------------------------------------------------------- +Thu Jul 12 03:31:08 UTC 2018 - arun@gmx.de + +- update to version 0.39.0: + * Here are the highlights for the Numba 0.39.0 release. + + This is the first version that supports Python 3.7. + + With help from Intel, we have fixed the issues with SVML support + (related issues #2938, #2998, #3006). + + List has gained support for containing reference-counted types + like NumPy arrays and `list`. Note, list still cannot hold + heterogeneous types. + + We have made a significant change to the internal + calling-convention, which should be transparent to most users, + to allow for a future feature that will permitting jumping back + into python-mode from a nopython-mode function. This also fixes + a limitation to `print` that disabled its use from nopython + functions that were deep in the call-stack. + + For CUDA GPU support, we added a `__cuda_array_interface__` + following the NumPy array interface specification to allow Numba + to consume externally defined device arrays. We have opened a + corresponding pull request to CuPy to test out the concept and + be able to use a CuPy GPU array. + + The Numba dispatcher `inspect_types()` method now supports the + kwarg `pretty` which if set to `True` will produce ANSI/HTML + output, showing the annotated types, when invoked from + ipython/jupyter-notebook respectively. + + The NumPy functions `ndarray.dot`, `np.percentile` and + `np.nanpercentile`, and `np.unique` are now supported. + + Numba now supports the use of a per-project configuration file + to permanently set behaviours typically set via `NUMBA_*` family + environment variables. + + Support for the `ppc64le` architecture has been added. + * Enhancements: + + PR #2793: Simplify and remove javascript from html_annotate + templates. + + PR #2840: Support list of refcounted types + + PR #2902: Support for np.unique + + PR #2926: Enable fence for all architecture and add developer + notes + + PR #2928: Making error about untyped list more informative. + + PR #2930: Add configuration file and color schemes. + + PR #2932: Fix encoding to 'UTF-8' in `check_output` decode. + + PR #2938: Python 3.7 compat: _Py_Finalizing becomes + _Py_IsFinalizing() + + PR #2939: Comprehensive SVML unit test + + PR #2946: Add support for `ndarray.dot` method and tests. + + PR #2953: percentile and nanpercentile + + PR #2957: Add new 3.7 opcode support. + + PR #2963: Improve alias analysis to be more comprehensive + + PR #2984: Support for namedtuples in array analysis + + PR #2986: Fix environment propagation + + PR #2990: Improve function call matching for intrinsics + + PR #3002: Second pass at error rewrites (interpreter errors). + + PR #3004: Add numpy.empty to the list of pure functions. + + PR #3008: Augment SVML detection with llvmlite SVML patch + detection. + + PR #3012: Make use of the common spelling of + heterogeneous/homogeneous. + + PR #3032: Fix pycc ctypes test due to mismatch in + calling-convention + + PR #3039: Add SVML detection to Numba environment diagnostic + tool. + + PR #3041: This adds @needs_blas to tests that use BLAS + + PR #3056: Require llvmlite>=0.24.0 + * CUDA Enhancements: + + PR #2860: __cuda_array_interface__ + + PR #2910: More CUDA intrinsics + + PR #2929: Add Flag To Prevent Unneccessary D->H Copies + + PR #3037: Add CUDA IPC support on non-peer-accessible devices + * CI Enhancements: + + PR #3021: Update appveyor config. + + PR #3040: Add fault handler to all builds + + PR #3042: Add catchsegv + + PR #3077: Adds optional number of processes for `-m` in testing + * Fixes: + + PR #2897: Fix line position of delete statement in numba ir + + PR #2905: Fix for #2862 + + PR #3009: Fix optional type returning in recursive call + + PR #3019: workaround and unittest for issue #3016 + + PR #3035: [TESTING] Attempt delayed removal of Env + + PR #3048: [WIP] Fix cuda tests failure on buildfarm + + PR #3054: Make test work on 32-bit + + PR #3062: Fix cuda.In freeing devary before the kernel launch + + PR #3073: Workaround #3072 + + PR #3076: Avoid ignored exception due to missing globals at + interpreter teardown + * Documentation Updates: + + PR #2966: Fix syntax in env var docs. + + PR #2967: Fix typo in CUDA kernel layout example. + + PR #2970: Fix docstring copy paste error. + +------------------------------------------------------------------- +Sun Jun 24 01:05:37 UTC 2018 - arun@gmx.de + +- update to version 0.38.1: + This is a critical bug fix release addressing: + https://github.com/numba/numba/issues/3006 + + The bug does not impact users using conda packages from Anaconda or Intel Python + Distribution (but it does impact conda-forge). It does not impact users of pip + using wheels from PyPI. + + This only impacts a small number of users where: + + * The ICC runtime (specifically libsvml) is present in the user's environment. + * The user is using an llvmlite statically linked against a version of LLVM + that has not been patched with SVML support. + * The platform is 64-bit. + + The release fixes a code generation path that could lead to the production of + incorrect results under the above situation. + + Fixes: + * PR #3007: Augment SVML detection with llvmlite SVML patch + detection. + +------------------------------------------------------------------- +Fri May 18 08:06:59 UTC 2018 - tchvatal@suse.com + +- Fix dependencies to match reality +- Add more items to make python2 build + +------------------------------------------------------------------- +Sat May 12 16:21:24 UTC 2018 - arun@gmx.de + +- update to version 0.38.0: + * highlights: + + Numba (via llvmlite) is now backed by LLVM 6.0, general + vectorization is improved as a result. A significant long + standing LLVM bug that was causing corruption was also found and + fixed. + + Further considerable improvements in vectorization are made + available as Numba now supports Intel's short vector math + library (SVML). Try it out with `conda install -c numba + icc_rt`. + + CUDA 8.0 is now the minimum supported CUDA version. + * Other highlights include: + + Bug fixes to `parallel=True` have enabled more vectorization + opportunities when using the ParallelAccelerator technology. + + Much effort has gone into improving error reporting and the + general usability of Numba. This includes highlighted error + messages and performance tips documentation. Try it out with + `conda install colorama`. + + A number of new NumPy functions are supported, `np.convolve`, + `np.correlate` `np.reshape`, `np.transpose`, `np.permutation`, + `np.real`, `np.imag`, and `np.searchsorted` now supports + the`side` kwarg. Further, `np.argsort` now supports the `kind` + kwarg with `quicksort` and `mergesort` available. + + The Numba extension API has gained the ability operate more + easily with functions from Cython modules through the use of + `numba.extending.get_cython_function_address` to obtain function + addresses for direct use in `ctypes.CFUNCTYPE`. + + Numba now allows the passing of jitted functions (and containers + of jitted functions) as arguments to other jitted functions. + + The CUDA functionality has gained support for a larger selection + of bit manipulation intrinsics, also SELP, and has had a number + of bugs fixed. + + Initial work to support the PPC64LE platform has been added, + full support is however waiting on the LLVM 6.0.1 release as it + contains critical patches not present in 6.0.0. It is hoped + that any remaining issues will be fixed in the next release. + + The capacity for advanced users/compiler engineers to define + their own compilation pipelines. + +------------------------------------------------------------------- +Mon Apr 23 14:55:41 UTC 2018 - toddrme2178@gmail.com + +- Fix dependency versions + +------------------------------------------------------------------- +Fri Mar 2 23:16:36 UTC 2018 - arun@gmx.de + +- specfile: + * update required llvmlite version + +- update to version 0.37.0: + * Misc enhancements: + + PR #2627: Remove hacks to make llvmlite threadsafe + + PR #2672: Add ascontiguousarray + + PR #2678: Add Gitter badge + + PR #2691: Fix #2690: add intrinsic to convert array to tuple + + PR #2703: Test runner feature: failed-first and last-failed + + PR #2708: Patch for issue #1907 + + PR #2732: Add support for array.fill + * Misc Fixes: + + PR #2610: Fix #2606 lowering of optional.setattr + + PR #2650: Remove skip for win32 cosine test + + PR #2668: Fix empty_like from readonly arrays. + + PR #2682: Fixes 2210, remove _DisableJitWrapper + + PR #2684: Fix #2340, generator error yielding bool + + PR #2693: Add travis-ci testing of NumPy 1.14, and also check on + Python 2.7 + + PR #2694: Avoid type inference failure due to a typing template + rejection + + PR #2695: Update llvmlite version dependency. + + PR #2696: Fix tuple indexing codegeneration for empty tuple + + PR #2698: Fix #2697 by deferring deletion in the simplify_CFG + loop. + + PR #2701: Small fix to avoid tempfiles being created in the + current directory + + PR #2725: Fix 2481, LLVM IR parsing error due to mutated IR + + PR #2726: Fix #2673: incorrect fork error msg. + + PR #2728: Alternative to #2620. Remove dead code + ByteCodeInst.get. + + PR #2730: Add guard for test needing SciPy/BLAS + * Documentation updates: + + PR #2670: Update communication channels + + PR #2671: Add docs about diagnosing loop vectorizer + + PR #2683: Add docs on const arg requirements and on const mem + alloc + + PR #2722: Add docs on numpy support in cuda + + PR #2724: Update doc: warning about unsupported arguments + * ParallelAccelerator enhancements/fixes: + + Parallel support for `np.arange` and `np.linspace`, also + `np.mean`, `np.std` and `np.var` are added. This was performed + as part of a general refactor and cleanup of the core ParallelAccelerator code. + + PR #2674: Core pa + + PR #2704: Generate Dels after parfor sequential lowering + + PR #2716: Handle matching directly supported functions + * CUDA enhancements: + + PR #2665: CUDA DeviceNDArray: Support numpy tranpose API + + PR #2681: Allow Assigning to DeviceNDArrays + + PR #2702: Make DummyArray do High Dimensional Reshapes + + PR #2714: Use CFFI to Reuse Code + * CUDA fixes: + + PR #2667: Fix CUDA DeviceNDArray slicing + + PR #2686: Fix #2663: incorrect offset when indexing cuda array. + + PR #2687: Ensure Constructed Stream Bound + + PR #2706: Workaround for unexpected warp divergence due to + exception raising code + + PR #2707: Fix regression: cuda test submodules not loading + properly in runtests + + PR #2731: Use more challenging values in slice tests. + + PR #2720: A quick testsuite fix to not run the new cuda testcase + in the multiprocess pool + +------------------------------------------------------------------- +Thu Jan 11 19:25:55 UTC 2018 - toddrme2178@gmail.com + +- Bump minimum llvmlite version. + +------------------------------------------------------------------- +Thu Dec 21 18:33:16 UTC 2017 - arun@gmx.de + +- update to version 0.36.2: + * PR #2645: Avoid CPython bug with "exec" in older 2.7.x. + * PR #2652: Add support for CUDA 9. + +------------------------------------------------------------------- +Fri Dec 8 17:59:51 UTC 2017 - arun@gmx.de + +- update to version 0.36.1: + * ParallelAccelerator features: + + PR #2457: Stencil Computations in ParallelAccelerator + + PR #2548: Slice and range fusion, parallelizing bitarray and + slice assignment + + PR #2516: Support general reductions in ParallelAccelerator + * ParallelAccelerator fixes: + + PR #2540: Fix bug #2537 + + PR #2566: Fix issue #2564. + + PR #2599: Fix nested multi-dimensional parfor type inference + issue + + PR #2604: Fixes for stencil tests and cmath sin(). + + PR #2605: Fixes issue #2603. + * PR #2568: Update for LLVM 5 + * PR #2607: Fixes abort when getting address to + "nrt_unresolved_abort" + * PR #2615: Working towards conda build 3 + * Misc fixes/enhancements: + + PR #2534: Add tuple support to np.take. + + PR #2551: Rebranding fix + + PR #2552: relative doc links + + PR #2570: Fix issue #2561, handle missing successor on loop exit + + PR #2588: Fix #2555. Disable libpython.so linking on linux + + PR #2601: Update llvmlite version dependency. + + PR #2608: Fix potential cache file collision + + PR #2612: Fix NRT test failure due to increased overhead when + running in coverage + + PR #2619: Fix dubious pthread_cond_signal not in lock + + PR #2622: Fix `np.nanmedian` for all NaN case. + + PR #2633: Fix markdown in CONTRIBUTING.md + + PR #2635: Make the dependency on compilers for AOT optional. + * CUDA support fixes: + + PR #2523: Fix invalid cuda context in memory transfer calls in + another thread + + PR #2575: Use CPU to initialize xoroshiro states for GPU + RNG. Fixes #2573 + + PR #2581: Fix cuda gufunc mishandling of scalar arg as array and + out argument + +------------------------------------------------------------------- +Tue Oct 3 06:05:20 UTC 2017 - arun@gmx.de + +- update to version 0.35.0: + * ParallelAccelerator: + + PR #2400: Array comprehension + + PR #2405: Support printing Numpy arrays + + PR #2438: from Support more np.random functions in + ParallelAccelerator + + PR #2482: Support for sum with axis in nopython mode. + + PR #2487: Adding developer documentation for ParallelAccelerator + technology. + + PR #2492: Core PA refactor adds assertions for broadcast + semantics + * ParallelAccelerator fixes: + + PR #2478: Rename cfg before parfor translation (#2477) + + PR #2479: Fix broken array comprehension tests on unsupported + platforms + + PR #2484: Fix array comprehension test on win64 + + PR #2506: Fix for 32-bit machines. + * Additional features of note: + + PR #2490: Implement np.take and ndarray.take + + PR #2493: Display a warning if parallel=True is set but not + possible. + + PR #2513: Add np.MachAr, np.finfo, np.iinfo + + PR #2515: Allow environ overriding of cpu target and cpu + features. + * Misc fixes/enhancements: + + PR #2455: add contextual information to runtime errors + + PR #2470: Fixes #2458, poor performance in np.median + + PR #2471: Ensure LLVM threadsafety in {g,}ufunc building. + + PR #2494: Update doc theme + + PR #2503: Remove hacky code added in 2482 and feature + enhancement + + PR #2505: Serialise env mutation tests during multithreaded + testing. + + PR #2520: Fix failing cpu-target override tests + * CUDA support fixes: + + PR #2504: Enable CUDA toolkit version testing + + PR #2509: Disable tests generating code unavailable in lower CC + versions. + + PR #2511: Fix Windows 64 bit CUDA tests. + +- changes from version 0.34.0: + * ParallelAccelerator features: + + PR #2318: Transfer ParallelAccelerator technology to Numba + + PR #2379: ParallelAccelerator Core Improvements + + PR #2367: Add support for len(range(...)) + + PR #2369: List comprehension + + PR #2391: Explicit Parallel Loop Support (prange) + * CUDA support enhancements: + + PR #2377: New GPU reduction algorithm + * CUDA support fixes: + + PR #2397: Fix #2393, always set alignment of cuda static memory + regions + * Misc Fixes: + + PR #2373, Issue #2372: 32-bit compatibility fix for parfor + related code + + PR #2376: Fix #2375 missing stdint.h for py2.7 vc9 + + PR #2378: Fix deadlock in parallel gufunc when kernel acquires + the GIL. + + PR #2382: Forbid unsafe casting in bitwise operation + + PR #2385: docs: fix Sphinx errors + + PR #2396: Use 64-bit RHS operand for shift + + PR #2404: Fix threadsafety logic issue in ufunc compilation + cache. + + PR #2424: Ensure consistent iteration order of blocks for type + inference. + + PR #2425: Guard code to prevent the use of ‘parallel’ on win32 + + py27 + + PR #2426: Basic test for Enum member type recovery. + + PR #2433: Fix up the parfors tests with respect to windows py2.7 + + PR #2442: Skip tests that need BLAS/LAPACK if scipy is not + available. + + PR #2444: Add test for invalid array setitem + + PR #2449: Make the runtime initialiser threadsafe + + PR #2452: Skip CFG test on 64bit windows + * Misc Enhancements: + + PR #2366: Improvements to IR utils + + PR #2388: Update README.rst to indicate the proper version of + LLVM + + PR #2394: Upgrade to llvmlite 0.19.* + + PR #2395: Update llvmlite version to 0.19 + + PR #2406: Expose environment object to ufuncs + + PR #2407: Expose environment object to target-context inside + lowerer + + PR #2413: Add flags to pass through to conda build for buildbot + + PR #2414: Add cross compile flags to local recipe + + PR #2415: A few cleanups for rewrites + + PR #2418: Add getitem support for Enum classes + + PR #2419: Add support for returning enums in vectorize + + PR #2421: Add copyright notice for Intel contributed files. + + PR #2422: Patch code base to work with np 1.13 release + + PR #2448: Adds in warning message when using ‘parallel’ if + cache=True + + PR #2450: Add test for keyword arg on .sum-like and .cumsum-like + array methods + +- changes from version 0.33.0: + * There are also several enhancements to the CUDA GPU support: + + A GPU random number generator based on xoroshiro128+ algorithm + is added. See details and examples in documentation. + + @cuda.jit CUDA kernels can now call @jit and @njit CPU functions + and they will automatically be compiled as CUDA device + functions. + + CUDA IPC memory API is exposed for sharing memory between + proceses. See usage details in documentation. + * Reference counting enhancements: + + PR #2346, Issue #2345, #2248: Add extra refcount pruning after + inlining + + PR #2349: Fix refct pruning not removing refct op with tail + call. + + PR #2352, Issue #2350: Add refcount pruning pass for function + that does not need refcount + * CUDA support enhancements: + + PR #2023: Supports CUDA IPC for device array + + PR #2343, Issue #2335: Allow CPU jit decorated function to be + used as cuda device function + + PR #2347: Add random number generator support for CUDA device + code + + PR #2361: Update autotune table for CC: 5.3, 6.0, 6.1, 6.2 + * Misc fixes: + + PR #2362: Avoid test failure due to typing to int32 on 32-bit + platforms + + PR #2359: Fixed nogil example that threw a TypeError when + executed. + + PR #2357, Issue #2356: Fix fragile test that depends on how the + script is executed. + + PR #2355: Fix cpu dispatcher referenced as attribute of another + module + + PR #2354: Fixes an issue with caching when function needs NRT + and refcount pruning + + PR #2342, Issue #2339: Add warnings to inspection when it is + used on unserialized cached code + + PR #2329, Issue #2250: Better handling of missing op codes + * Misc enhancements: + + PR #2360: Adds missing values in error mesasge interp. + + PR #2353: Handle when get_host_cpu_features() raises + RuntimeError + + PR #2351: Enable SVML for erf/erfc/gamma/lgamma/log2 + + PR #2344: Expose error_model setting in jit decorator + + PR #2337: Align blocking terminate support for fork() with new + TBB version + + PR #2336: Bump llvmlite version to 0.18 + + PR #2330: Core changes in PR #2318 + +------------------------------------------------------------------- +Wed May 3 18:23:09 UTC 2017 - toddrme2178@gmail.com + +- update to version 0.32.0: + + Improvements: + * PR #2322: Suppress test error due to unknown but consistent error with tgamma + * PR #2320: Update llvmlite dependency to 0.17 + * PR #2308: Add details to error message on why cuda support is disabled. + * PR #2302: Add os x to travis + * PR #2294: Disable remove_module on MCJIT due to memory leak inside LLVM + * PR #2291: Split parallel tests and recycle workers to tame memory usage + * PR #2253: Remove the pointer-stuffing hack for storing meminfos in lists + + Fixes: + * PR #2331: Fix a bug in the GPU array indexing + * PR #2326: Fix #2321 docs referring to non-existing function. + * PR #2316: Fixing more race-condition problems + * PR #2315: Fix #2314. Relax strict type check to allow optional type. + * PR #2310: Fix race condition due to concurrent compilation and cache loading + * PR #2304: Fix intrinsic 1st arg not a typing.Context as stated by the docs. + * PR #2287: Fix int64 atomic min-max + * PR #2286: Fix #2285 `@overload_method` not linking dependent libs + * PR #2303: Missing import statements to interval-example.rst +- Implement single-spec version + +------------------------------------------------------------------- +Wed Feb 22 22:15:53 UTC 2017 - arun@gmx.de + +- update to version 0.31.0: + * Improvements: + + PR #2281: Update for numpy1.12 + + PR #2278: Add CUDA atomic.{max, min, compare_and_swap} + + PR #2277: Add about section to conda recipies to identify + license and other metadata in Anaconda Cloud + + PR #2271: Adopt itanium C++-style mangling for CPU and CUDA + targets + + PR #2267: Add fastmath flags + + PR #2261: Support dtype.type + + PR #2249: Changes for llvm3.9 + + PR #2234: Bump llvmlite requirement to 0.16 and add + install_name_tool_fixer to mviewbuf for OS X + + PR #2230: Add python3.6 to TravisCi + + PR #2227: Enable caching for gufunc wrapper + + PR #2170: Add debugging support + + PR #2037: inspect_cfg() for easier visualization of the function + operation + * Fixes: + + PR #2274: Fix nvvm ir patch in mishandling “load” + + PR #2272: Fix breakage to cuda7.5 + + PR #2269: Fix caching of copy_strides kernel in cuda.reduce + + PR #2265: Fix #2263: error when linking two modules with dynamic + globals + + PR #2252: Fix path separator in test + + PR #2246: Fix overuse of memory in some system with fork + + PR #2241: Fix #2240: __module__ in dynamically created function + not a str + + PR #2239: Fix fingerprint computation failure preventing + fallback + +------------------------------------------------------------------- +Sun Jan 15 00:33:08 UTC 2017 - arun@gmx.de + +- update to version 0.30.1: + * Fixes: + + PR #2232: Fix name clashes with _Py_hashtable_xxx in Python 3.6. + * Improvements: + + PR #2217: Add Intel TBB threadpool implementation for parallel + ufunc. + +------------------------------------------------------------------- +Tue Jan 10 17:17:33 UTC 2017 - arun@gmx.de + +- specfile: + * update copyright year + +- update to version 0.30.0: + * Improvements: + + PR #2209: Support Python 3.6. + + PR #2175: Support np.trace(), np.outer() and np.kron(). + + PR #2197: Support np.nanprod(). + + PR #2190: Support caching for ufunc. + + PR #2186: Add system reporting tool. + * Fixes: + + PR #2214, Issue #2212: Fix memory error with ndenumerate and + flat iterators. + + PR #2206, Issue #2163: Fix zip() consuming extra elements in + early exhaustion. + + PR #2185, Issue #2159, #2169: Fix rewrite pass affecting objmode + fallback. + + PR #2204, Issue #2178: Fix annotation for liftedloop. + + PR #2203: Fix Appveyor segfault with Python 3.5. + + PR #2202, Issue #2198: Fix target context not initialized when + loading from ufunc cache. + + PR #2172, Issue #2171: Fix optional type unpacking. + + PR #2189, Issue #2188: Disable freezing of big (>1MB) global + arrays. + + PR #2180, Issue #2179: Fix invalid variable version in + looplifting. + + PR #2156, Issue #2155: Fix divmod, floordiv segfault on CUDA. + +------------------------------------------------------------------- +Fri Dec 2 21:07:51 UTC 2016 - jengelh@inai.de + +- remove subjective words from description + +------------------------------------------------------------------- +Sat Nov 5 17:53:40 UTC 2016 - arun@gmx.de + +- update to version 0.29.0: + * Improvements: + + PR #2130, #2137: Add type-inferred recursion with docs and + examples. + + PR #2134: Add np.linalg.matrix_power. + + PR #2125: Add np.roots. + + PR #2129: Add np.linalg.{eigvals,eigh,eigvalsh}. + + PR #2126: Add array-to-array broadcasting. + + PR #2069: Add hstack and related functions. + + PR #2128: Allow for vectorizing a jitted function. (thanks to + @dhirschfeld) + + PR #2117: Update examples and make them test-able. + + PR #2127: Refactor interpreter class and its results. + * Fixes: + + PR #2149: Workaround MSVC9.0 SP1 fmod bug kb982107. + + PR #2145, Issue #2009: Fixes kwargs for jitclass __init__ + method. + + PR #2150: Fix slowdown in objmode fallback. + + PR #2050, Issue #1258: Fix liveness problem with some generator + loops. + + PR #2072, Issue #1995: Right shift of unsigned LHS should be + logical. + + PR #2115, Issue #1466: Fix inspect_types() error due to mangled + variable name. + + PR #2119, Issue #2118: Fix array type created from record-dtype. + + PR #2122, Issue #1808: Fix returning a generator due to + datamodel error. + +------------------------------------------------------------------- +Fri Sep 23 23:38:02 UTC 2016 - toddrme2178@gmail.com + +- Initial version + diff --git a/python-numba.spec b/python-numba.spec new file mode 100644 index 0000000..36a3f7f --- /dev/null +++ b/python-numba.spec @@ -0,0 +1,191 @@ +# +# spec file for package python-numba +# +# Copyright (c) 2024 SUSE LLC +# +# All modifications and additions to the file contributed by third parties +# remain the property of their copyright owners, unless otherwise agreed +# upon. The license for this file, and modifications and additions to the +# file, is the same license as for the pristine package itself (unless the +# license for the pristine package is not an Open Source License, in which +# case the license is the MIT License). An "Open Source License" is a +# license that conforms to the Open Source Definition (Version 1.9) +# published by the Open Source Initiative. + +# Please submit bugfixes or comments via https://bugs.opensuse.org/ +# + + +%define plainpython python +# upper bound is exclusive: min-numpy_ver <= numpy < max_numpy_ver +%define min_numpy_ver 1.24 +%define max_numpy_ver 2.2 + +%{?sle15_python_module_pythons} + +%global flavor @BUILD_FLAVOR@%{nil} +%if "%{flavor}" == "" +%define psuffix %{nil} +%bcond_with test +# Supported Platforms: https://numba.pydata.org/numba-doc/dev/user/installing.html#compatibility +ExclusiveArch: x86_64 %ix86 ppc64le %arm aarch64 +%else +%bcond_without test +%define psuffix -%{flavor} +%if "%{flavor}" != "test-py39" +%define skip_python39 1 +%endif +%if "%{flavor}" != "test-py310" +%define skip_python310 1 +%endif +%if "%{flavor}" != "test-py311" +%define skip_python311 1 +%endif +%if "%{flavor}" != "test-py312" +%define skip_python312 1 +%endif +%if "%{flavor}" != "test-py313" +%define skip_python313 1 +%endif +# The obs server-side interpreter cannot use lua or rpm shrink +%if "%pythons" == "" || "%pythons" == " " || "%pythons" == " " || "%pythons" == " " || "%pythons" == " " || ( "%pythons" == "python311" && 0%{?skip_python311} ) +ExclusiveArch: donotbuild +%define python_module() %flavor-not-enabled-in-buildset-for-suse-%{?suse_version} +%else +# Tests fail on ppc64 big endian, not resolvable on s390x, wrong types on 32-bit. See also above compatibility list for building +ExcludeArch: s390x ppc64 %ix86 %arm +%endif +%endif +# not supported with 0.60.0 +%global skip_python313 1 +Name: python-numba%{?psuffix} +Version: 0.60.0 +Release: 0 +Summary: NumPy-aware optimizing compiler for Python using LLVM +License: BSD-2-Clause +URL: https://numba.pydata.org/ +# SourceRepository: https://github.com/numba/numba +Source: https://files.pythonhosted.org/packages/source/n/numba/numba-%{version}.tar.gz +# PATCH-FIX-OPENSUSE skip tests failing due to OBS specifics +Patch3: skip-failing-tests.patch +# PATCH-FIX-UPSTREAM https://github.com/numba/numba/pull/9741 Add Support for NumPy 2.1 +Patch4: numpy21.patch +BuildRequires: %{python_module devel >= 3.9} +BuildRequires: %{python_module numpy-devel >= %{min_numpy_ver} with %python-numpy-devel < %{max_numpy_ver}} +BuildRequires: %{python_module pip} +BuildRequires: %{python_module setuptools} +BuildRequires: %{python_module wheel} +BuildRequires: fdupes +BuildRequires: gcc-c++ +BuildRequires: python-rpm-macros +BuildRequires: (tbb-devel >= 2021) +Requires: (python-llvmlite >= 0.43 with python-llvmlite < 0.44) +Requires: (python-numpy >= %{min_numpy_ver} with python-numpy < %{max_numpy_ver}) +Requires(post): update-alternatives +Requires(postun): update-alternatives +Recommends: python-Jinja2 +Recommends: python-Pygments +Recommends: python-cffi +Recommends: python-scipy > 1.0 +Recommends: python-tbb +%if %{with test} +BuildRequires: %{python_module Jinja2} +BuildRequires: %{python_module PyYAML} +BuildRequires: %{python_module Pygments} +BuildRequires: %{python_module cffi} +BuildRequires: %{python_module ipython} +BuildRequires: %{python_module numba = %{version}} +BuildRequires: %{python_module numba-devel = %{version}} +BuildRequires: %{python_module pip} +BuildRequires: %{python_module psutil} +BuildRequires: %{python_module pytest} +BuildRequires: %{python_module scipy >= 1.0} +BuildRequires: %{python_module tbb} +%endif +%python_subpackages + +%description +Numba is a NumPy-aware optimizing compiler for Python. It uses the +LLVM compiler infrastructure to compile Python syntax to +machine code. + +It is aware of NumPy arrays as typed memory regions and so can speed-up +code using NumPy arrays. Other, less well-typed code will be translated +to Python C-API calls, effectively removing the "interpreter", but not removing +the dynamic indirection. + +Numba is also not a tracing JIT. It *compiles* your code before it gets +run, either using run-time type information or type information you provide +in the decorator. + +Numba is a mechanism for producing machine code from Python syntax and typed +data structures such as those that exist in NumPy. + +%package devel +Summary: Development files for numba applications +Requires: %{name} = %{version} +Requires: python-devel +Requires: python-numpy-devel >= %{min_numpy_ver} +Requires: %{plainpython}(abi) = %{python_version} + +%description devel +This package contains files for developing applications using numba. + +%prep +%autosetup -p1 -n numba-%{version} +sed -i -e '1{/env python/ d}' numba/misc/appdirs.py + +# random timeouts in OBS +rm numba/tests/test_typedlist.py +# if we reduced the amount of tests too much: +# sed -i -e '/check_testsuite_size/ s/5000/3000/' numba/tests/test_runtests.py + +%build +%if !%{with test} +export CFLAGS="%{optflags} -fPIC" +%pyproject_wheel +%endif + +%install +%if !%{with test} +%pyproject_install +%{python_expand # +%fdupes %{buildroot}%{$python_sitearch} +find %{buildroot}%{$python_sitearch} -name '*.[ch]' > devel-files0-%{$python_bin_suffix}.files +sed 's|^%{buildroot}||' devel-files0-%{$python_bin_suffix}.files > devel-files-%{$python_bin_suffix}.files +sed 's|^%{buildroot}|%%exclude |' devel-files0-%{$python_bin_suffix}.files > devel-files-exclude-%{$python_bin_suffix}.files +} +%python_clone -a %{buildroot}%{_bindir}/numba +%endif + +%check +%if %{with test} +# test the installed package, not the source without compiled modules +mkdir emptytestdir +pushd emptytestdir +%{python_expand # numbatests: check specific tests with `osc build -M test --define="numbatests "` +%{_bindir}/numba-%%{$python_bin_suffix} -s +$python -m numba.runtests -v -b --exclude-tags='long_running' -m %{_smp_build_ncpus} -- %{?!numbatests:numba.tests}%{?numbatests} +} +popd +%endif + +%if !%{with test} +%post +%python_install_alternative numba + +%postun +%python_uninstall_alternative numba + +%files %{python_files} -f devel-files-exclude-%{python_bin_suffix}.files +%license LICENSE +%doc CHANGE_LOG README.rst +%python_alternative %{_bindir}/numba +%{python_sitearch}/numba/ +%{python_sitearch}/numba-%{version}.dist-info + +%files %{python_files devel} -f devel-files-%{python_bin_suffix}.files +%license LICENSE +%endif + +%changelog diff --git a/skip-failing-tests.patch b/skip-failing-tests.patch new file mode 100644 index 0000000..a8e5cac --- /dev/null +++ b/skip-failing-tests.patch @@ -0,0 +1,71 @@ +--- + numba/tests/test_parfors.py | 5 +++-- + numba/tests/test_parfors_passes.py | 1 + + 2 files changed, 4 insertions(+), 2 deletions(-) + +Index: numba-0.59.1/numba/tests/test_parfors.py +=================================================================== +--- numba-0.59.1.orig/numba/tests/test_parfors.py ++++ numba-0.59.1/numba/tests/test_parfors.py +@@ -1199,6 +1199,7 @@ class TestParforNumPy(TestParforsBase): + self.check_variants(test_impl2, data_gen) + self.count_parfors_variants(test_impl2, data_gen) + ++ @unittest.skip("Fails on type check in OBS") + def test_ndarray_fill(self): + def test_impl(x): + x.fill(7.0) +@@ -4659,7 +4660,7 @@ class TestParforsVectorizer(TestPrangeBa + + return asm + +- @linux_only ++ @unittest.skip("Our x86_64 asm is most probably different from the upstream one.") + @TestCase.run_test_in_subprocess + def test_vectorizer_fastmath_asm(self): + """ This checks that if fastmath is set and the underlying hardware +@@ -4700,7 +4701,7 @@ class TestParforsVectorizer(TestPrangeBa + # check no zmm addressing is present + self.assertTrue('zmm' not in v) + +- @linux_only ++ @unittest.skip("Our x86_64 asm is most probably different from the upstream one.") + @TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': '0'}) + def test_unsigned_refusal_to_vectorize(self): + """ This checks that if fastmath is set and the underlying hardware +Index: numba-0.59.1/numba/tests/test_parfors_passes.py +=================================================================== +--- numba-0.59.1.orig/numba/tests/test_parfors_passes.py ++++ numba-0.59.1/numba/tests/test_parfors_passes.py +@@ -514,6 +514,7 @@ class TestConvertLoopPass(BaseTest): + str(raises.exception), + ) + ++ @unittest.skip("Fails on type check in OBS") + def test_init_prange(self): + def test_impl(): + n = 20 +Index: numba-0.59.1/numba/tests/test_cli.py +=================================================================== +--- numba-0.59.1.orig/numba/tests/test_cli.py ++++ numba-0.59.1/numba/tests/test_cli.py +@@ -264,6 +264,7 @@ class TestGDBCLIInfoBrokenGdbs(TestCase) + self.assertIn("No such file or directory", stdout) + self.assertIn(path, stdout) + ++ @unittest.skip("Fails on type check in OBS") + def test_nonsense_gdb_binary(self): + # Tests that a nonsense binary specified as gdb it picked up ok + env = os.environ.copy() +Index: numba-0.59.1/numba/tests/test_mathlib.py +=================================================================== +--- numba-0.59.1.orig/numba/tests/test_mathlib.py ++++ numba-0.59.1/numba/tests/test_mathlib.py +@@ -508,6 +508,7 @@ class TestMathLib(TestCase): + float('-inf'), float('inf'), float('nan')] + self.run_unary(pyfunc, x_types, x_values, prec='exact') + ++ @unittest.skip("Fails on ppc64le https://github.com/numba/numba/issues/8489") + def test_ldexp(self): + pyfunc = ldexp + cfunc = njit(pyfunc) From 2abad7d325180aa379eb4e08fcde9bf5712430547df6b83e8417b265f36bd35c Mon Sep 17 00:00:00 2001 From: Matej Cepl Date: Fri, 22 Nov 2024 11:22:11 +0000 Subject: [PATCH 2/2] - Add upstream py313.patch to support Python 3.13 OBS-URL: https://build.opensuse.org/package/show/devel:languages:python:numeric/python-numba?expand=0&rev=98 --- _multibuild | 1 + py313.patch | 4127 ++++++++++++++++++++++++++++++++++++++++++ python-numba.changes | 5 + python-numba.spec | 4 +- 4 files changed, 4135 insertions(+), 2 deletions(-) create mode 100644 py313.patch diff --git a/_multibuild b/_multibuild index 6d8cafe..cd365a1 100644 --- a/_multibuild +++ b/_multibuild @@ -2,4 +2,5 @@ test-py310 test-py311 test-py312 + test-py313 diff --git a/py313.patch b/py313.patch new file mode 100644 index 0000000..8ea9ce7 --- /dev/null +++ b/py313.patch @@ -0,0 +1,4127 @@ +From 3e89582a41fc2712a0ce86528be98cf3dd768a23 Mon Sep 17 00:00:00 2001 +From: Siu Kwan Lam <1929845+sklam@users.noreply.github.com> +Date: Tue, 16 Jan 2024 14:31:01 -0600 +Subject: [PATCH 01/61] Minimal changes to get compiling + +Added pythoncapi_compat.h from https://github.com/python/pythoncapi-compat +--- + numba/_devicearray.cpp | 2 +- + numba/_dispatcher.cpp | 8 +- + numba/_dynfunc.c | 12 +- + numba/_helperlib.c | 4 +- + numba/_pymodule.h | 4 + + numba/_typeof.cpp | 8 + + numba/core/runtime/_nrt_python.c | 2 +- + numba/experimental/jitclass/_box.c | 2 +- + numba/mviewbuf.c | 2 +- + numba/np/ufunc/_internal.c | 4 +- + numba/pythoncapi_compat.h | 1114 ++++++++++++++++++++++++++++ + setup.py | 2 +- + 12 files changed, 1148 insertions(+), 16 deletions(-) + create mode 100644 numba/pythoncapi_compat.h + +Index: numba-0.60.0/numba/_devicearray.cpp +=================================================================== +--- numba-0.60.0.orig/numba/_devicearray.cpp ++++ numba-0.60.0/numba/_devicearray.cpp +@@ -96,7 +96,7 @@ PyTypeObject DeviceArrayType = { + /* WARNING: Do not remove this, only modify it! It is a version guard to + * act as a reminder to update this struct on Python version update! */ + #if (PY_MAJOR_VERSION == 3) +-#if ! ((PY_MINOR_VERSION == 9) || (PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11) || (PY_MINOR_VERSION == 12)) ++#if ! (NB_SUPPORTED_PYTHON_MINOR) + #error "Python minor version is not supported." + #endif + #else +Index: numba-0.60.0/numba/_dispatcher.cpp +=================================================================== +--- numba-0.60.0.orig/numba/_dispatcher.cpp ++++ numba-0.60.0/numba/_dispatcher.cpp +@@ -27,7 +27,7 @@ + * + */ + +-#if (PY_MAJOR_VERSION >= 3) && (PY_MINOR_VERSION == 12) ++#if (PY_MAJOR_VERSION >= 3) && (PY_MINOR_VERSION == 12) || (PY_MAJOR_VERSION >= 3) && (PY_MINOR_VERSION == 13) + + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 +@@ -39,7 +39,10 @@ + # undef HAVE_STD_ATOMIC + #endif + #undef _PyGC_FINALIZED +-#include "internal/pycore_atomic.h" ++ ++#if (PY_MINOR_VERSION == 12) ++ #include "internal/pycore_atomic.h" ++#endif + #include "internal/pycore_interp.h" + #include "internal/pycore_pyerrors.h" + #include "internal/pycore_instruments.h" +@@ -780,7 +783,7 @@ call_cfunc(Dispatcher *self, PyObject *c + } + } + +-#elif (PY_MAJOR_VERSION >= 3) && (PY_MINOR_VERSION == 12) ++#elif (PY_MAJOR_VERSION >= 3) && ((PY_MINOR_VERSION == 12) || (PY_MINOR_VERSION == 13)) + + // Python 3.12 has a completely new approach to tracing and profiling due to + // the new `sys.monitoring` system. +@@ -1589,7 +1592,7 @@ static PyTypeObject DispatcherType = { + 0, /* tp_version_tag */ + 0, /* tp_finalize */ + 0, /* tp_vectorcall */ +-#if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 12) ++#if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 12) || (PY_MAJOR_VERSION >= 3) && (PY_MINOR_VERSION == 13) + /* This was introduced first in 3.12 + * https://github.com/python/cpython/issues/91051 + */ +@@ -1599,7 +1602,7 @@ static PyTypeObject DispatcherType = { + /* WARNING: Do not remove this, only modify it! It is a version guard to + * act as a reminder to update this struct on Python version update! */ + #if (PY_MAJOR_VERSION == 3) +-#if ! ((PY_MINOR_VERSION == 9) || (PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11) || (PY_MINOR_VERSION == 12)) ++#if ! (NB_SUPPORTED_PYTHON_MINOR) + #error "Python minor version is not supported." + #endif + #else +Index: numba-0.60.0/numba/_dynfunc.c +=================================================================== +--- numba-0.60.0.orig/numba/_dynfunc.c ++++ numba-0.60.0/numba/_dynfunc.c +@@ -7,6 +7,12 @@ + + #include + ++ ++// if python version is 3.13 ++#if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 13) ++ #include "pythoncapi_compat.h" ++ #define _Py_IsFinalizing Py_IsFinalizing ++#endif + /* NOTE: EnvironmentObject and ClosureObject must be kept in sync with + * the definitions in numba/targets/base.py (EnvBody and ClosureBody). + */ +@@ -146,7 +152,7 @@ static PyTypeObject EnvironmentType = { + /* WARNING: Do not remove this, only modify it! It is a version guard to + * act as a reminder to update this struct on Python version update! */ + #if (PY_MAJOR_VERSION == 3) +-#if ! ((PY_MINOR_VERSION == 9) || (PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11) || (PY_MINOR_VERSION == 12)) ++#if ! (NB_SUPPORTED_PYTHON_MINOR) + #error "Python minor version is not supported." + #endif + #else +@@ -265,7 +271,7 @@ static PyTypeObject ClosureType = { + /* WARNING: Do not remove this, only modify it! It is a version guard to + * act as a reminder to update this struct on Python version update! */ + #if (PY_MAJOR_VERSION == 3) +-#if ! ((PY_MINOR_VERSION == 9) || (PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11) || (PY_MINOR_VERSION == 12)) ++#if ! (NB_SUPPORTED_PYTHON_MINOR) + #error "Python minor version is not supported." + #endif + #else +@@ -485,7 +491,7 @@ static PyTypeObject GeneratorType = { + /* WARNING: Do not remove this, only modify it! It is a version guard to + * act as a reminder to update this struct on Python version update! */ + #if (PY_MAJOR_VERSION == 3) +-#if ! ((PY_MINOR_VERSION == 9) || (PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11) || (PY_MINOR_VERSION == 12)) ++#if ! (NB_SUPPORTED_PYTHON_MINOR) + #error "Python minor version is not supported." + #endif + #else +Index: numba-0.60.0/numba/_helperlib.c +=================================================================== +--- numba-0.60.0.orig/numba/_helperlib.c ++++ numba-0.60.0/numba/_helperlib.c +@@ -293,7 +293,7 @@ numba_recreate_record(void *pdata, int s + return NULL; + } + +- numpy = PyImport_ImportModuleNoBlock("numpy"); ++ numpy = PyImport_ImportModule("numpy"); + if (!numpy) goto CLEANUP; + + numpy_record = PyObject_GetAttrString(numpy, "record"); +@@ -833,7 +833,7 @@ static void traceback_add(const char *fu + if (!frame) + goto error; + +-#if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 12) /* 3.12 */ ++#if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 12) || (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 13) /* 3.12 or 3.13 */ + #elif (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 11) /* 3.11 */ + + /* unsafe cast to our copy of _frame to access the f_lineno field */ +@@ -851,7 +851,7 @@ static void traceback_add(const char *fu + Py_DECREF(frame); + return; + +-#if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 12) /* 3.12 */ ++#if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 12) || (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION == 13) /* 3.12 or 3.13 */ + error: + _PyErr_ChainExceptions1(exc); + #elif (PY_MAJOR_VERSION == 3) && ((PY_MINOR_VERSION == 9) || (PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11)) /* 3.11 and below */ +Index: numba-0.60.0/numba/_pymodule.h +=================================================================== +--- numba-0.60.0.orig/numba/_pymodule.h ++++ numba-0.60.0/numba/_pymodule.h +@@ -29,4 +29,7 @@ + PyObject_SetAttrString(m, #name, tmp); \ + Py_DECREF(tmp); } while (0) + ++ ++#define NB_SUPPORTED_PYTHON_MINOR ((PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11) || (PY_MINOR_VERSION == 12) || (PY_MINOR_VERSION == 13)) ++ + #endif /* NUMBA_PY_MODULE_H_ */ +Index: numba-0.60.0/numba/_typeof.cpp +=================================================================== +--- numba-0.60.0.orig/numba/_typeof.cpp ++++ numba-0.60.0/numba/_typeof.cpp +@@ -16,6 +16,14 @@ + #include + #endif + ++#if (PY_MAJOR_VERSION >= 3) && (PY_MINOR_VERSION == 13) ++ #ifndef Py_BUILD_CORE ++ #define Py_BUILD_CORE 1 ++ #endif ++ #include "internal/pycore_setobject.h" // _PySet_NextEntry() ++#endif ++ ++ + /* Cached typecodes for basic scalar types */ + static int tc_int8; + static int tc_int16; +Index: numba-0.60.0/numba/core/runtime/_nrt_python.c +=================================================================== +--- numba-0.60.0.orig/numba/core/runtime/_nrt_python.c ++++ numba-0.60.0/numba/core/runtime/_nrt_python.c +@@ -229,7 +229,7 @@ static PyTypeObject MemInfoType = { + /* WARNING: Do not remove this, only modify it! It is a version guard to + * act as a reminder to update this struct on Python version update! */ + #if (PY_MAJOR_VERSION == 3) +-#if ! ((PY_MINOR_VERSION == 9) || (PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11) || (PY_MINOR_VERSION == 12)) ++#if ! (NB_SUPPORTED_PYTHON_MINOR) + #error "Python minor version is not supported." + #endif + #else +Index: numba-0.60.0/numba/experimental/jitclass/_box.c +=================================================================== +--- numba-0.60.0.orig/numba/experimental/jitclass/_box.c ++++ numba-0.60.0/numba/experimental/jitclass/_box.c +@@ -110,7 +110,7 @@ static PyTypeObject BoxType = { + /* WARNING: Do not remove this, only modify it! It is a version guard to + * act as a reminder to update this struct on Python version update! */ + #if (PY_MAJOR_VERSION == 3) +-#if ! ((PY_MINOR_VERSION == 9) || (PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11) || (PY_MINOR_VERSION == 12)) ++#if ! (NB_SUPPORTED_PYTHON_MINOR) + #error "Python minor version is not supported." + #endif + #else +Index: numba-0.60.0/numba/mviewbuf.c +=================================================================== +--- numba-0.60.0.orig/numba/mviewbuf.c ++++ numba-0.60.0/numba/mviewbuf.c +@@ -344,7 +344,7 @@ static PyTypeObject MemAllocType = { + /* WARNING: Do not remove this, only modify it! It is a version guard to + * act as a reminder to update this struct on Python version update! */ + #if (PY_MAJOR_VERSION == 3) +-#if ! ((PY_MINOR_VERSION == 9) || (PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11) || (PY_MINOR_VERSION == 12)) ++#if ! (NB_SUPPORTED_PYTHON_MINOR) + #error "Python minor version is not supported." + #endif + #else +Index: numba-0.60.0/numba/np/ufunc/_internal.c +=================================================================== +--- numba-0.60.0.orig/numba/np/ufunc/_internal.c ++++ numba-0.60.0/numba/np/ufunc/_internal.c +@@ -100,7 +100,7 @@ PyTypeObject PyUFuncCleaner_Type = { + /* WARNING: Do not remove this, only modify it! It is a version guard to + * act as a reminder to update this struct on Python version update! */ + #if (PY_MAJOR_VERSION == 3) +-#if ! ((PY_MINOR_VERSION == 9) || (PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11) || (PY_MINOR_VERSION == 12)) ++#if ! (NB_SUPPORTED_PYTHON_MINOR) + #error "Python minor version is not supported." + #endif + #else +@@ -753,7 +753,7 @@ PyTypeObject PyDUFunc_Type = { + /* WARNING: Do not remove this, only modify it! It is a version guard to + * act as a reminder to update this struct on Python version update! */ + #if (PY_MAJOR_VERSION == 3) +-#if ! ((PY_MINOR_VERSION == 9) || (PY_MINOR_VERSION == 10) || (PY_MINOR_VERSION == 11) || (PY_MINOR_VERSION == 12)) ++#if ! (NB_SUPPORTED_PYTHON_MINOR) + #error "Python minor version is not supported." + #endif + #else +Index: numba-0.60.0/numba/core/bytecode.py +=================================================================== +--- numba-0.60.0.orig/numba/core/bytecode.py ++++ numba-0.60.0/numba/core/bytecode.py +@@ -9,7 +9,7 @@ from numba.core import errors, utils, se + from numba.core.utils import PYVERSION + + +-if PYVERSION in ((3, 12), ): ++if PYVERSION in ((3, 12), (3, 13)): + from opcode import _inline_cache_entries + # Instruction/opcode length in bytes + INSTR_LEN = 2 +@@ -104,7 +104,12 @@ class ByteCodeInst(object): + # https://bugs.python.org/issue27129 + # https://github.com/python/cpython/pull/25069 + assert self.is_jump +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 13),): ++ if self.opcode in (dis.opmap[k] ++ for k in ["JUMP_BACKWARD", ++ "JUMP_BACKWARD_NO_INTERRUPT"]): ++ return self.next - (self.arg * 2) ++ elif PYVERSION in ((3, 12),): + if self.opcode in (dis.opmap[k] + for k in ["JUMP_BACKWARD"]): + return self.offset - (self.arg - 1) * 2 +@@ -121,7 +126,7 @@ class ByteCodeInst(object): + else: + raise NotImplementedError(PYVERSION) + +- if PYVERSION in ((3, 10), (3, 11), (3, 12)): ++ if PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)): + if self.opcode in JREL_OPS: + return self.next + self.arg * 2 + else: +@@ -160,7 +165,7 @@ OPCODE_NOP = dis.opname.index('NOP') + + + # Adapted from Lib/dis.py +-def _unpack_opargs(code): ++def _unpack_opargs_pre_3_13(code): + """ + Returns a 4-int-tuple of + (bytecode offset, opcode, argument, offset of next bytecode). +@@ -176,7 +181,7 @@ def _unpack_opargs(code): + for j in range(ARG_LEN): + arg |= code[i + j] << (8 * j) + i += ARG_LEN +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12),): + # Python 3.12 introduced cache slots. We need to account for + # cache slots when we determine the offset of the next opcode. + # The number of cache slots is specific to each opcode and can +@@ -200,7 +205,7 @@ def _unpack_opargs(code): + else: + arg = None + i += NO_ARG_LEN +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12),): + # Python 3.12 introduced cache slots. We need to account for + # cache slots when we determine the offset of the next opcode. + # The number of cache slots is specific to each opcode and can +@@ -216,6 +221,80 @@ def _unpack_opargs(code): + offset = i # Mark inst offset at first extended + + ++# Adapted from Lib/dis.py ++def _unpack_opargs_pre_3_13(code): ++ """ ++ Returns a 4-int-tuple of ++ (bytecode offset, opcode, argument, offset of next bytecode). ++ """ ++ extended_arg = 0 ++ n = len(code) ++ offset = i = 0 ++ while i < n: ++ op = code[i] ++ i += CODE_LEN ++ if op >= HAVE_ARGUMENT: ++ arg = code[i] | extended_arg ++ for j in range(ARG_LEN): ++ arg |= code[i + j] << (8 * j) ++ i += ARG_LEN ++ if PYVERSION in ((3, 12),): ++ # Python 3.12 introduced cache slots. We need to account for ++ # cache slots when we determine the offset of the next opcode. ++ # The number of cache slots is specific to each opcode and can ++ # be looked up in the _inline_cache_entries dictionary. ++ i += _inline_cache_entries[op] * INSTR_LEN ++ elif PYVERSION in ((3, 10), (3, 11)): ++ pass ++ else: ++ raise NotImplementedError(PYVERSION) ++ if op == EXTENDED_ARG: ++ # This is a deviation from what dis does... ++ # In python 3.11 it seems like EXTENDED_ARGs appear more often ++ # and are also used as jump targets. So as to not have to do ++ # "book keeping" for where EXTENDED_ARGs have been "skipped" ++ # they are replaced with NOPs so as to provide a legal jump ++ # target and also ensure that the bytecode offsets are correct. ++ yield (offset, OPCODE_NOP, arg, i) ++ extended_arg = arg << 8 * ARG_LEN ++ offset = i ++ continue ++ else: ++ arg = None ++ i += NO_ARG_LEN ++ if PYVERSION in ((3, 12),): ++ # Python 3.12 introduced cache slots. We need to account for ++ # cache slots when we determine the offset of the next opcode. ++ # The number of cache slots is specific to each opcode and can ++ # be looked up in the _inline_cache_entries dictionary. ++ i += _inline_cache_entries[op] * INSTR_LEN ++ elif PYVERSION in ((3, 10), (3, 11)): ++ pass ++ else: ++ raise NotImplementedError(PYVERSION) ++ ++ extended_arg = 0 ++ yield (offset, op, arg, i) ++ offset = i # Mark inst offset at first extended ++ ++ ++if PYVERSION in ((3, 13),): ++ ++ def _unpack_opargs(code): ++ buf = [] ++ for i, start_offset, op, arg in dis._unpack_opargs(code): ++ buf.append((start_offset, op, arg)) ++ for i, (start_offset, op, arg) in enumerate(buf): ++ if i + 1 < len(buf): ++ next_offset = buf[i + 1][0] ++ else: ++ next_offset = len(code) ++ yield (start_offset, op, arg, next_offset) ++ ++else: ++ _unpack_opargs = _unpack_opargs_pre_3_13 ++ ++ + def _patched_opargs(bc_stream): + """Patch the bytecode stream. + +@@ -298,7 +377,7 @@ class _ByteCode(object): + # Start with first bytecode's lineno + known = code.co_firstlineno + for inst in table.values(): +- if inst.lineno >= 0: ++ if inst.lineno is not None and inst.lineno >= 0: + known = inst.lineno + else: + inst.lineno = known +@@ -363,7 +442,7 @@ class _ByteCode(object): + + + def _fix_LOAD_GLOBAL_arg(arg): +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + return arg >> 1 + elif PYVERSION in ((3, 9), (3, 10)): + return arg +@@ -452,8 +531,15 @@ class ByteCodePy312(ByteCodePy311): + entirely along with the dead exceptions that it points to. + A pair of exception that sandwiches these exception will + also be merged into a single exception. +- """ + ++ Update for Python 3.13, the ending of the pattern has a extra ++ POP_TOP: ++ ++ ... ++ END_FOR ++ POP_TOP ++ SWAP(2) ++ """ + def pop_and_merge_exceptions(entries: list, + entry_to_remove: _ExceptionTableEntry): + lower_entry_idx = entries.index(entry_to_remove) - 1 +@@ -505,17 +591,34 @@ class ByteCodePy312(ByteCodePy311): + if not next_inst.opname == "FOR_ITER": + continue + +- # Check end of pattern, two instructions. +- # Check for the corresponding END_FOR, exception table end is +- # non-inclusive, so subtract one. +- index = self.ordered_offsets.index(entry.end) +- curr_inst = self.table[self.ordered_offsets[index - 1]] +- if not curr_inst.opname == "END_FOR": +- continue +- # END_FOR must be followed by SWAP(2) +- next_inst = self.table[self.ordered_offsets[index]] +- if not next_inst.opname == "SWAP" and next_inst.arg == 2: +- continue ++ if PYVERSION == (3, 13): ++ # Check end of pattern, two instructions. ++ # Check for the corresponding END_FOR, exception table end ++ # is non-inclusive, so subtract one. ++ index = self.ordered_offsets.index(entry.end) ++ curr_inst = self.table[self.ordered_offsets[index - 2]] ++ if not curr_inst.opname == "END_FOR": ++ continue ++ next_inst = self.table[self.ordered_offsets[index - 1]] ++ if not next_inst.opname == "POP_TOP": ++ continue ++ # END_FOR must be followed by SWAP(2) ++ next_inst = self.table[self.ordered_offsets[index]] ++ if not next_inst.opname == "SWAP" and next_inst.arg == 2: ++ continue ++ else: ++ assert PYVERSION < (3, 13) ++ # Check end of pattern, two instructions. ++ # Check for the corresponding END_FOR, exception table end ++ # is non-inclusive, so subtract one. ++ index = self.ordered_offsets.index(entry.end) ++ curr_inst = self.table[self.ordered_offsets[index - 1]] ++ if not curr_inst.opname == "END_FOR": ++ continue ++ # END_FOR must be followed by SWAP(2) ++ next_inst = self.table[self.ordered_offsets[index]] ++ if not next_inst.opname == "SWAP" and next_inst.arg == 2: ++ continue + # If all conditions are met that means this exception entry + # is for a list/dict/set comprehension and can be removed. + # Also if there exist exception entries above and below this +@@ -528,7 +631,7 @@ class ByteCodePy312(ByteCodePy311): + + if PYVERSION == (3, 11): + ByteCode = ByteCodePy311 +-elif PYVERSION == (3, 12): ++elif PYVERSION in ((3, 12), (3, 13),): + ByteCode = ByteCodePy312 + elif PYVERSION < (3, 11): + ByteCode = _ByteCode +Index: numba-0.60.0/numba/core/byteflow.py +=================================================================== +--- numba-0.60.0.orig/numba/core/byteflow.py ++++ numba-0.60.0/numba/core/byteflow.py +@@ -10,7 +10,7 @@ from functools import total_ordering + from numba.core.utils import UniqueDict, PYVERSION, ALL_BINOPS_TO_OPERATORS + from numba.core.controlflow import NEW_BLOCKERS, CFGraph + from numba.core.ir import Loc +-from numba.core.errors import UnsupportedError ++from numba.core.errors import UnsupportedBytecodeError + + + _logger = logging.getLogger(__name__) +@@ -24,7 +24,7 @@ _NO_RAISE_OPS = frozenset({ + 'PRECALL', + }) + +-if PYVERSION in ((3, 12), ): ++if PYVERSION in ((3, 12), (3, 13)): + from enum import Enum + + # Operands for CALL_INTRINSIC_1 +@@ -149,7 +149,7 @@ class Flow(object): + self.block_infos[state.pc_initial] = si = adapt_state_infos(state) + _logger.debug("block_infos %s:\n%s", state, si) + +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + def _run_handle_exception(self, runner, state): + if not state.in_with() and ( + state.has_active_try() and +@@ -312,7 +312,7 @@ class Flow(object): + msg = ("The 'with (context manager) as " + "(variable):' construct is not " + "supported.") +- raise UnsupportedError(msg) ++ raise UnsupportedBytecodeError(msg) + + + def _is_null_temp_reg(reg): +@@ -331,7 +331,7 @@ class TraceRunner(object): + return Loc(self.debug_filename, lineno) + + def dispatch(self, state): +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + if state._blockstack: + state: State + while state._blockstack: +@@ -354,7 +354,8 @@ class TraceRunner(object): + fn(state, inst) + else: + msg = "Use of unsupported opcode (%s) found" % inst.opname +- raise UnsupportedError(msg, loc=self.get_debug_loc(inst.lineno)) ++ raise UnsupportedBytecodeError(msg, ++ loc=self.get_debug_loc(inst.lineno)) + + def _adjust_except_stack(self, state): + """ +@@ -405,6 +406,15 @@ class TraceRunner(object): + state.push(state.make_temp()) + state.append(inst) + ++ if PYVERSION in ((3, 13),): ++ def op_FORMAT_SIMPLE(self, state, inst): ++ assert PYVERSION == (3, 13) ++ value = state.pop() ++ strvar = state.make_temp() ++ res = state.make_temp() ++ state.append(inst, value=value, res=res, strvar=strvar) ++ state.push(res) ++ + def op_FORMAT_VALUE(self, state, inst): + """ + FORMAT_VALUE(flags): flags argument specifies format spec which is +@@ -415,7 +425,8 @@ class TraceRunner(object): + """ + if inst.arg != 0: + msg = "format spec in f-strings not supported yet" +- raise UnsupportedError(msg, loc=self.get_debug_loc(inst.lineno)) ++ raise UnsupportedBytecodeError(msg, ++ loc=self.get_debug_loc(inst.lineno)) + value = state.pop() + strvar = state.make_temp() + res = state.make_temp() +@@ -442,7 +453,27 @@ class TraceRunner(object): + def op_POP_TOP(self, state, inst): + state.pop() + +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 13),): ++ def op_TO_BOOL(self, state, inst): ++ res = state.make_temp() ++ tos = state.pop() ++ state.append(inst, val=tos, res=res) ++ state.push(res) ++ ++ elif PYVERSION < (3, 13): ++ pass ++ ++ if PYVERSION in ((3, 13),): ++ def op_LOAD_GLOBAL(self, state, inst): ++ # Ordering of the global value and NULL is swapped in Py3.13 ++ res = state.make_temp() ++ idx = inst.arg >> 1 ++ state.append(inst, idx=idx, res=res) ++ state.push(res) ++ # ignoring the NULL ++ if inst.arg & 1: ++ state.push(state.make_null()) ++ elif PYVERSION in ((3, 11), (3, 12)): + def op_LOAD_GLOBAL(self, state, inst): + res = state.make_temp() + idx = inst.arg >> 1 +@@ -471,30 +502,89 @@ class TraceRunner(object): + state.push(res) + + def op_LOAD_CONST(self, state, inst): +- res = state.make_temp("const") ++ # append const index for interpreter to read the const value ++ res = state.make_temp("const") + f".{inst.arg}" + state.push(res) + state.append(inst, res=res) + + def op_LOAD_ATTR(self, state, inst): + item = state.pop() +- if PYVERSION in ((3, 12), ): ++ res = state.make_temp() ++ if PYVERSION in ((3, 13),): ++ state.push(res) # the attr ++ if inst.arg & 1: ++ state.push(state.make_null()) ++ elif PYVERSION in ((3, 12),): + if inst.arg & 1: + state.push(state.make_null()) ++ state.push(res) + elif PYVERSION in ((3, 9), (3, 10), (3, 11)): +- pass ++ state.push(res) + else: + raise NotImplementedError(PYVERSION) +- res = state.make_temp() + state.append(inst, item=item, res=res) +- state.push(res) + + def op_LOAD_FAST(self, state, inst): +- name = state.get_varname(inst) ++ assert PYVERSION <= (3, 13) ++ if PYVERSION in ((3, 13), ): ++ try: ++ name = state.get_varname(inst) ++ except IndexError: # oparg is out of range ++ # Handle this like a LOAD_DEREF ++ # Assume MAKE_CELL and COPY_FREE_VARS has correctly setup the ++ # states. ++ # According to https://github.com/python/cpython/blob/9ac606080a0074cdf7589d9b7c9413a73e0ddf37/Objects/codeobject.c#L730C9-L759 # noqa E501 ++ # localsplus is locals + cells + freevars ++ bc = state._bytecode ++ num_varnames = len(bc.co_varnames) ++ num_freevars = len(bc.co_freevars) ++ num_cellvars = len(bc.co_cellvars) ++ max_fast_local = num_cellvars + num_freevars ++ assert 0 <= inst.arg - num_varnames < max_fast_local ++ res = state.make_temp() ++ state.append(inst, res=res, as_load_deref=True) ++ state.push(res) ++ return ++ else: ++ name = state.get_varname(inst) + res = state.make_temp(name) + state.append(inst, res=res) + state.push(res) + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 13),): ++ def op_LOAD_FAST_LOAD_FAST(self, state, inst): ++ oparg = inst.arg ++ oparg1 = oparg >> 4 ++ oparg2 = oparg & 15 ++ name1 = state.get_varname_by_arg(oparg1) ++ name2 = state.get_varname_by_arg(oparg2) ++ res1 = state.make_temp(name1) ++ res2 = state.make_temp(name2) ++ state.append(inst, res1=res1, res2=res2) ++ state.push(res1) ++ state.push(res2) ++ ++ def op_STORE_FAST_LOAD_FAST(self, state, inst): ++ oparg = inst.arg ++ # oparg1 = oparg >> 4 # not needed ++ oparg2 = oparg & 15 ++ store_value = state.pop() ++ load_name = state.get_varname_by_arg(oparg2) ++ load_res = state.make_temp(load_name) ++ state.append(inst, store_value=store_value, load_res=load_res) ++ state.push(load_res) ++ ++ def op_STORE_FAST_STORE_FAST(self, state, inst): ++ value1 = state.pop() ++ value2 = state.pop() ++ state.append(inst, value1=value1, value2=value2) ++ ++ elif PYVERSION in ((3, 10), (3, 11), (3, 12)): ++ pass ++ else: ++ raise NotImplementedError(PYVERSION) ++ ++ if PYVERSION in ((3, 12), (3, 13)): + op_LOAD_FAST_CHECK = op_LOAD_FAST + op_LOAD_FAST_AND_CLEAR = op_LOAD_FAST + elif PYVERSION in ((3, 9), (3, 10), (3, 11)): +@@ -753,7 +843,7 @@ class TraceRunner(object): + ) + state.push(res) + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + def op_BINARY_SLICE(self, state, inst): + end = state.pop() + start = state.pop() +@@ -771,7 +861,7 @@ class TraceRunner(object): + else: + raise NotImplementedError(PYVERSION) + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + def op_STORE_SLICE(self, state, inst): + end = state.pop() + start = state.pop() +@@ -804,7 +894,7 @@ class TraceRunner(object): + op_POP_JUMP_IF_TRUE = _op_POP_JUMP_IF + op_POP_JUMP_IF_FALSE = _op_POP_JUMP_IF + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + op_POP_JUMP_IF_NONE = _op_POP_JUMP_IF + op_POP_JUMP_IF_NOT_NONE = _op_POP_JUMP_IF + elif PYVERSION in ((3, 9), (3, 10), (3, 11)): +@@ -853,6 +943,8 @@ class TraceRunner(object): + state.append(inst) + state.fork(pc=inst.get_jump_target()) + ++ op_JUMP_BACKWARD_NO_INTERRUPT = op_JUMP_BACKWARD ++ + def op_JUMP_ABSOLUTE(self, state, inst): + state.append(inst) + state.fork(pc=inst.get_jump_target()) +@@ -868,7 +960,7 @@ class TraceRunner(object): + state.append(inst, retval=state.pop(), castval=state.make_temp()) + state.terminate() + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + def op_RETURN_CONST(self, state, inst): + res = state.make_temp("const") + state.append(inst, retval=res, castval=state.make_temp()) +@@ -884,14 +976,14 @@ class TraceRunner(object): + state.append(inst, value=val, res=res) + state.push(res) + +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + def op_RAISE_VARARGS(self, state, inst): + if inst.arg == 0: + exc = None + # No re-raising within a try-except block. + # But we allow bare reraise. + if state.has_active_try(): +- raise UnsupportedError( ++ raise UnsupportedBytecodeError( + "The re-raising of an exception is not yet supported.", + loc=self.get_debug_loc(inst.lineno), + ) +@@ -915,7 +1007,7 @@ class TraceRunner(object): + if inst.arg == 0: + exc = None + if in_exc_block: +- raise UnsupportedError( ++ raise UnsupportedBytecodeError( + "The re-raising of an exception is not yet supported.", + loc=self.get_debug_loc(inst.lineno), + ) +@@ -940,7 +1032,10 @@ class TraceRunner(object): + blk = state.pop_block() + state.reset_stack(blk['entry_stack']) + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 13),): ++ def op_END_FOR(self, state, inst): ++ state.pop() ++ elif PYVERSION in ((3, 12),): + def op_END_FOR(self, state, inst): + state.pop() + state.pop() +@@ -954,7 +1049,8 @@ class TraceRunner(object): + if inst.arg != 0: + msg = ('Unsupported use of a bytecode related to try..finally' + ' or a with-context') +- raise UnsupportedError(msg, loc=self.get_debug_loc(inst.lineno)) ++ raise UnsupportedBytecodeError(msg, ++ loc=self.get_debug_loc(inst.lineno)) + + def op_CALL_FINALLY(self, state, inst): + pass +@@ -1068,7 +1164,7 @@ class TraceRunner(object): + 'FINALLY', state, next=inst.next, end=inst.get_jump_target(), + ) + +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + def op_POP_EXCEPT(self, state, inst): + state.pop() + +@@ -1076,7 +1172,7 @@ class TraceRunner(object): + def op_POP_EXCEPT(self, state, inst): + blk = state.pop_block() + if blk['kind'] not in {BlockKind('EXCEPT'), BlockKind('FINALLY')}: +- raise UnsupportedError( ++ raise UnsupportedBytecodeError( + f"POP_EXCEPT got an unexpected block: {blk['kind']}", + loc=self.get_debug_loc(inst.lineno), + ) +@@ -1117,16 +1213,24 @@ class TraceRunner(object): + def op_CALL(self, state, inst): + narg = inst.arg + args = list(reversed([state.pop() for _ in range(narg)])) +- callable_or_firstarg = state.pop() +- null_or_callable = state.pop() +- if _is_null_temp_reg(null_or_callable): +- callable = callable_or_firstarg +- else: +- callable = null_or_callable +- args = [callable_or_firstarg, *args] ++ if PYVERSION == (3, 13): ++ null_or_self = state.pop() ++ # position of the callable is fixed ++ callable = state.pop() ++ if not _is_null_temp_reg(null_or_self): ++ args = [null_or_self, *args] ++ kw_names = None ++ elif PYVERSION < (3, 13): ++ callable_or_firstarg = state.pop() ++ null_or_callable = state.pop() ++ if _is_null_temp_reg(null_or_callable): ++ callable = callable_or_firstarg ++ else: ++ callable = null_or_callable ++ args = [callable_or_firstarg, *args] ++ kw_names = state.pop_kw_names() + res = state.make_temp() + +- kw_names = state.pop_kw_names() + state.append(inst, func=callable, args=args, kw_names=kw_names, res=res) + state.push(res) + +@@ -1152,28 +1256,67 @@ class TraceRunner(object): + state.append(inst, func=func, args=args, names=names, res=res) + state.push(res) + +- def op_CALL_FUNCTION_EX(self, state, inst): +- if inst.arg & 1 and PYVERSION < (3, 10): +- errmsg = "CALL_FUNCTION_EX with **kwargs not supported" +- raise UnsupportedError(errmsg) +- if inst.arg & 1: +- varkwarg = state.pop() +- else: +- varkwarg = None +- vararg = state.pop() +- func = state.pop() ++ if PYVERSION in ((3, 13),): ++ def op_CALL_KW(self, state, inst): ++ narg = inst.arg ++ kw_names = state.pop() ++ args = list(reversed([state.pop() for _ in range(narg)])) ++ null_or_firstarg = state.pop() ++ callable = state.pop() ++ if not _is_null_temp_reg(null_or_firstarg): ++ args = [null_or_firstarg, *args] + +- if PYVERSION in ((3, 11), (3, 12)): +- if _is_null_temp_reg(state.peek(1)): +- state.pop() # pop NULL, it's not used +- elif PYVERSION in ((3, 9), (3, 10)): +- pass +- else: +- raise NotImplementedError(PYVERSION) ++ res = state.make_temp() ++ state.append(inst, func=callable, args=args, kw_names=kw_names, ++ res=res) ++ state.push(res) + +- res = state.make_temp() +- state.append(inst, func=func, vararg=vararg, varkwarg=varkwarg, res=res) +- state.push(res) ++ elif PYVERSION in ((3, 10), (3, 11), (3, 12)): ++ pass ++ else: ++ raise NotImplementedError(PYVERSION) ++ ++ if PYVERSION in ((3, 13),): ++ def op_CALL_FUNCTION_EX(self, state, inst): ++ # (func, unused, callargs, kwargs if (oparg & 1) -- result)) ++ if inst.arg & 1: ++ varkwarg = state.pop() ++ else: ++ varkwarg = None ++ ++ vararg = state.pop() ++ state.pop() # unused ++ func = state.pop() ++ ++ res = state.make_temp() ++ state.append(inst, func=func, vararg=vararg, varkwarg=varkwarg, ++ res=res) ++ state.push(res) ++ ++ elif PYVERSION in ((3, 10), (3, 11), (3, 12)): ++ ++ def op_CALL_FUNCTION_EX(self, state, inst): ++ if inst.arg & 1: ++ varkwarg = state.pop() ++ else: ++ varkwarg = None ++ vararg = state.pop() ++ func = state.pop() ++ ++ if PYVERSION in ((3, 11), (3, 12)): ++ if _is_null_temp_reg(state.peek(1)): ++ state.pop() # pop NULL, it's not used ++ elif PYVERSION in ((3, 9), (3, 10)): ++ pass ++ else: ++ raise NotImplementedError(PYVERSION) ++ ++ res = state.make_temp() ++ state.append(inst, func=func, vararg=vararg, varkwarg=varkwarg, ++ res=res) ++ state.push(res) ++ else: ++ raise NotImplementedError(PYVERSION) + + def _dup_topx(self, state, inst, count): + orig = [state.pop() for _ in range(count)] +@@ -1187,7 +1330,7 @@ class TraceRunner(object): + for val in duped: + state.push(val) + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + def op_CALL_INTRINSIC_1(self, state, inst): + # See https://github.com/python/cpython/blob/v3.12.0rc2/Include/ + # internal/pycore_intrinsics.h#L3-L17C36 +@@ -1404,7 +1547,7 @@ class TraceRunner(object): + pred=pred) + state.push(indval) + end = inst.get_jump_target() +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + # Changed in version 3.12: Up until 3.11 the iterator was + # popped when it was exhausted. Now this is handled using END_FOR + # op code. +@@ -1490,7 +1633,7 @@ class TraceRunner(object): + op_BINARY_XOR = _binaryop + + def op_MAKE_FUNCTION(self, state, inst, MAKE_CLOSURE=False): +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + # https://github.com/python/cpython/commit/2f180ce + # name set via co_qualname + name = None +@@ -1500,14 +1643,19 @@ class TraceRunner(object): + raise NotImplementedError(PYVERSION) + code = state.pop() + closure = annotations = kwdefaults = defaults = None +- if inst.arg & 0x8: +- closure = state.pop() +- if inst.arg & 0x4: +- annotations = state.pop() +- if inst.arg & 0x2: +- kwdefaults = state.pop() +- if inst.arg & 0x1: +- defaults = state.pop() ++ if PYVERSION in ((3, 13), ): ++ assert inst.arg is None ++ # SET_FUNCTION_ATTRIBUTE is responsible for setting ++ # closure, annotations, kwdefaults and defaults. ++ else: ++ if inst.arg & 0x8: ++ closure = state.pop() ++ if inst.arg & 0x4: ++ annotations = state.pop() ++ if inst.arg & 0x2: ++ kwdefaults = state.pop() ++ if inst.arg & 0x1: ++ defaults = state.pop() + res = state.make_temp() + state.append( + inst, +@@ -1521,6 +1669,27 @@ class TraceRunner(object): + ) + state.push(res) + ++ def op_SET_FUNCTION_ATTRIBUTE(self, state, inst): ++ assert PYVERSION in ((3, 13), ) ++ make_func_stack = state.pop() ++ data = state.pop() ++ if inst.arg == 0x1: ++ # 0x01 a tuple of default values for positional-only and ++ # positional-or-keyword parameters in positional order ++ state.set_function_attribute(make_func_stack, defaults=data) ++ elif inst.arg & 0x2: ++ # 0x02 a tuple of strings containing parameters’ annotations ++ state.set_function_attribute(make_func_stack, kwdefaults=data) ++ elif inst.arg & 0x4: ++ # 0x04 a tuple of strings containing parameters’ annotations ++ state.set_function_attribute(make_func_stack, annotations=data) ++ elif inst.arg == 0x8: ++ # 0x08 a tuple containing cells for free variables, making a closure ++ state.set_function_attribute(make_func_stack, closure=data) ++ else: ++ raise AssertionError("unreachable") ++ state.push(make_func_stack) ++ + def op_MAKE_CLOSURE(self, state, inst): + self.op_MAKE_FUNCTION(state, inst, MAKE_CLOSURE=True) + +@@ -1551,7 +1720,7 @@ class TraceRunner(object): + state.fork(pc=inst.next) + state.fork(pc=inst.get_jump_target()) + +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + def op_RERAISE(self, state, inst): + # This isn't handled, but the state is set up anyway + exc = state.pop() +@@ -1576,7 +1745,7 @@ class TraceRunner(object): + # NOTE: Please see notes in `interpreter.py` surrounding the implementation + # of LOAD_METHOD and CALL_METHOD. + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + # LOAD_METHOD has become a pseudo-instruction in 3.12 + pass + elif PYVERSION in ((3, 11), ): +@@ -1828,9 +1997,14 @@ class _State(object): + return self.get_top_block('TRY') is not None + + def get_varname(self, inst): ++ """Get referenced variable name from the instruction's oparg ++ """ ++ return self.get_varname_by_arg(inst.arg) ++ ++ def get_varname_by_arg(self, oparg: int): + """Get referenced variable name from the oparg + """ +- return self._bytecode.co_varnames[inst.arg] ++ return self._bytecode.co_varnames[oparg] + + def terminate(self): + """Mark block as terminated +@@ -1852,7 +2026,7 @@ class _State(object): + stack.append(self.make_temp()) + # Handle changes on the blockstack + blockstack = list(self._blockstack) +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + # pop expired block in destination pc + while blockstack: + top = blockstack[-1] +@@ -1940,7 +2114,21 @@ class StatePy311(_State): + return self.make_temp(prefix="null$") + + +-if PYVERSION >= (3, 11): ++class StatePy313(StatePy311): ++ def __init__(self, *args, **kwargs): ++ super().__init__(*args, **kwargs) ++ self._make_func_attrs = defaultdict(dict) ++ ++ def set_function_attribute(self, make_func_res, **kwargs): ++ self._make_func_attrs[make_func_res].update(kwargs) ++ ++ def get_function_attributes(self, make_func_res): ++ return self._make_func_attrs[make_func_res] ++ ++ ++if PYVERSION in ((3, 13), ): ++ State = StatePy313 ++elif PYVERSION in ((3, 11), (3, 12)): + State = StatePy311 + elif PYVERSION < (3, 11): + State = _State +@@ -1970,8 +2158,20 @@ AdaptBlockInfo = namedtuple( + + + def adapt_state_infos(state): ++ def process_function_attributes(inst_pair): ++ offset, data = inst_pair ++ inst = state._bytecode[offset] ++ if inst.opname == "MAKE_FUNCTION": ++ data.update(state.get_function_attributes(data['res'])) ++ return offset, data ++ if PYVERSION in ((3, 13), ): ++ insts = tuple(map(process_function_attributes, state.instructions)) ++ elif PYVERSION in ((3, 10), (3, 11), (3, 12)): ++ insts = tuple(state.instructions) ++ else: ++ raise NotImplementedError(PYVERSION) + return AdaptBlockInfo( +- insts=tuple(state.instructions), ++ insts=insts, + outgoing_phis=state.outgoing_phis, + blockstack=state.blockstack_initial, + active_try_block=state.find_initial_try_block(), +Index: numba-0.60.0/numba/core/controlflow.py +=================================================================== +--- numba-0.60.0.orig/numba/core/controlflow.py ++++ numba-0.60.0/numba/core/controlflow.py +@@ -954,7 +954,7 @@ class ControlFlowAnalysis(object): + self._curblock.terminating = True + self._force_new_block = True + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + def op_RETURN_CONST(self, inst): + self._curblock.terminating = True + self._force_new_block = True +Index: numba-0.60.0/numba/core/interpreter.py +=================================================================== +--- numba-0.60.0.orig/numba/core/interpreter.py ++++ numba-0.60.0/numba/core/interpreter.py +@@ -6,7 +6,11 @@ import logging + import textwrap + + from numba.core import errors, ir, config +-from numba.core.errors import NotDefinedError, UnsupportedError, error_extras ++from numba.core.errors import ( ++ NotDefinedError, ++ UnsupportedBytecodeError, ++ error_extras, ++) + from numba.core.ir_utils import get_definition, guard + from numba.core.utils import (PYVERSION, BINOPS_TO_OPERATORS, + INPLACE_BINOPS_TO_OPERATORS,) +@@ -15,7 +19,7 @@ from numba.core.unsafe import eh + from numba.cpython.unsafe.tuple import unpack_single_tuple + + +-if PYVERSION in ((3, 12), ): ++if PYVERSION in ((3, 12), (3, 13)): + # Operands for CALL_INTRINSIC_1 + from numba.core.byteflow import CALL_INTRINSIC_1_Operand as ci1op + elif PYVERSION in ((3, 9), (3, 10), (3, 11)): +@@ -108,7 +112,7 @@ def _remove_assignment_definition(old_bo + func_ir._definitions[lhs].remove(rhs) + already_deleted_defs[lhs].add(rhs) + elif rhs not in already_deleted_defs[lhs]: +- raise UnsupportedError( ++ raise UnsupportedBytecodeError( + "Inconsistency found in the definitions while executing" + " a peephole optimization. This suggests an internal" + " error or inconsistency elsewhere in the compiler." +@@ -211,7 +215,7 @@ def _call_function_ex_replace_kws_large( + ): + # We cannot handle this format so raise the + # original error message. +- raise UnsupportedError(errmsg) ++ raise UnsupportedBytecodeError(errmsg) + key_var_name = const_stmt.target.name + key_val = const_stmt.value.value + search_start += 1 +@@ -257,7 +261,7 @@ def _call_function_ex_replace_kws_large( + ): + # We cannot handle this format so raise the + # original error message. +- raise UnsupportedError(errmsg) ++ raise UnsupportedBytecodeError(errmsg) + setitem_stmt = old_body[search_start + 1] + if not ( + isinstance(setitem_stmt, ir.Assign) +@@ -277,7 +281,7 @@ def _call_function_ex_replace_kws_large( + # getattr. If for some reason this doesn't match the code + # format, we raise the original error message. This check + # is meant as a precaution. +- raise UnsupportedError(errmsg) ++ raise UnsupportedBytecodeError(errmsg) + arg_var = setitem_stmt.value.args[1] + # Append the (key, value) pair. + kws.append((key_val, arg_var)) +@@ -421,7 +425,7 @@ def _call_function_ex_replace_args_large + and concat_stmt.value.fn == operator.add + ): + # We cannot handle this format. +- raise UnsupportedError(errmsg) ++ raise UnsupportedBytecodeError(errmsg) + lhs_name = concat_stmt.value.lhs.name + rhs_name = concat_stmt.value.rhs.name + # The previous statement should be a +@@ -439,7 +443,7 @@ def _call_function_ex_replace_args_large + and len(arg_tuple_stmt.value.items) == 1 + ): + # We cannot handle this format. +- raise UnsupportedError(errmsg) ++ raise UnsupportedBytecodeError(errmsg) + if arg_tuple_stmt.target.name == lhs_name: + # The tuple should always be generated on the RHS. + raise AssertionError("unreachable") +@@ -447,7 +451,7 @@ def _call_function_ex_replace_args_large + target_name = lhs_name + else: + # We cannot handle this format. +- raise UnsupportedError(errmsg) ++ raise UnsupportedBytecodeError(errmsg) + total_args.append( + arg_tuple_stmt.value.items[0] + ) +@@ -497,7 +501,7 @@ def _call_function_ex_replace_args_large + # If we reached the start we never found the build_tuple. + # We cannot handle this format so raise the + # original error message. +- raise UnsupportedError(errmsg) ++ raise UnsupportedBytecodeError(errmsg) + # Reverse the arguments so we get the correct order. + return total_args[::-1] + +@@ -586,7 +590,7 @@ def peep_hole_call_function_ex_to_call_f + # If we couldn't find where the kwargs are created + # then it should be a normal **kwargs call + # so we produce an unsupported message. +- raise UnsupportedError(errmsg) ++ raise UnsupportedBytecodeError(errmsg) + # Determine the kws + if keyword_def.value.items: + # n_kws <= 15 case. +@@ -638,7 +642,7 @@ def peep_hole_call_function_ex_to_call_f + if args: + # If we have vararg then args is expected to + # be an empty list. +- raise UnsupportedError(errmsg) ++ raise UnsupportedBytecodeError(errmsg) + vararg_loc = start_search + args_def = None + found = False +@@ -654,7 +658,7 @@ def peep_hole_call_function_ex_to_call_f + if not found: + # If we couldn't find where the args are created + # then we can't handle this format. +- raise UnsupportedError(errmsg) ++ raise UnsupportedBytecodeError(errmsg) + if ( + isinstance(args_def.value, ir.Expr) + and args_def.value.op == "build_tuple" +@@ -683,7 +687,7 @@ def peep_hole_call_function_ex_to_call_f + # If there is a call with vararg we need to check + # if the list -> tuple conversion failed and if so + # throw an error. +- raise UnsupportedError(errmsg) ++ raise UnsupportedBytecodeError(errmsg) + else: + # Here the IR is an initial empty build_tuple. + # Then for each arg, a new tuple with a single +@@ -747,7 +751,7 @@ def peep_hole_call_function_ex_to_call_f + # exception. + expr = func_ir._definitions[vararg_name][0] + if isinstance(expr, ir.Expr) and expr.op == "list_to_tuple": +- raise UnsupportedError(errmsg) ++ raise UnsupportedBytecodeError(errmsg) + + new_body.append(stmt) + # Replace the block body if we changed the IR +@@ -1197,7 +1201,7 @@ def peep_hole_fuse_dict_add_updates(func + else: + # If we cannot remove _update_from_bytecode + # Then raise an error for the user. +- raise UnsupportedError(errmsg) ++ raise UnsupportedBytecodeError(errmsg) + + # Check if we need to drop any maps from being tracked. + # Skip the setitem/_update_from_bytecode getattr that +@@ -1385,7 +1389,7 @@ class Interpreter(object): + max(inst_blocks.body)) + self.last_active_offset = last_active_offset + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + self.active_exception_entries = tuple( + [entry for entry in self.bytecode.exception_entries + if entry.start < self.last_active_offset]) +@@ -1401,7 +1405,7 @@ class Interpreter(object): + # Interpret loop + for inst, kws in self._iter_inst(): + self._dispatch(inst, kws) +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + # Insert end of try markers + self._end_try_blocks() + elif PYVERSION in ((3, 9), (3, 10)): +@@ -1418,12 +1422,12 @@ class Interpreter(object): + # post process the IR to rewrite opcodes/byte sequences that are too + # involved to risk handling as part of direct interpretation + peepholes = [] +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + peepholes.append(peep_hole_split_at_pop_block) +- if PYVERSION in ((3, 9), (3, 10), (3, 11), (3, 12)): ++ if PYVERSION in ((3, 9), (3, 10), (3, 11), (3, 12), (3, 13)): + peepholes.append(peep_hole_list_to_tuple) + peepholes.append(peep_hole_delete_with_exit) +- if PYVERSION in ((3, 10), (3, 11), (3, 12)): ++ if PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)): + # peep_hole_call_function_ex_to_call_function_kw + # depends on peep_hole_list_to_tuple converting + # any large number of arguments from a list to a +@@ -1456,7 +1460,7 @@ class Interpreter(object): + + See also: _insert_try_block_end + """ +- assert PYVERSION in ((3, 11), (3, 12)) ++ assert PYVERSION in ((3, 11), (3, 12), (3, 13)) + graph = self.cfa.graph + for offset, block in self.blocks.items(): + # Get current blockstack +@@ -1507,7 +1511,7 @@ class Interpreter(object): + first = uservar[0] + loc = self.current_scope.get(first).loc + msg = "Exception object cannot be stored into variable ({})." +- raise errors.UnsupportedError(msg.format(first), loc=loc) ++ raise errors.UnsupportedBytecodeError(msg.format(first), loc=loc) + + def init_first_block(self): + # Define variables receiving the function arguments +@@ -1564,7 +1568,7 @@ class Interpreter(object): + self.dfainfo = self.dfa.infos[self.current_block_offset] + self.assigner = Assigner() + # Check out-of-scope syntactic-block +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + # This is recreating pre-3.11 code structure + while self.syntax_blocks: + if offset >= self.syntax_blocks[-1].exit: +@@ -1735,7 +1739,7 @@ class Interpreter(object): + val = self.get(varname) + except ir.NotDefinedError: + # Hack to make sure exception variables are defined +- assert PYVERSION in ((3, 11), (3, 12)), \ ++ assert PYVERSION in ((3, 11), (3, 12), (3, 13)), \ + "unexpected missing definition" + val = ir.Const(value=None, loc=self.loc) + stmt = ir.Assign(value=val, target=target, +@@ -1795,7 +1799,7 @@ class Interpreter(object): + if self._DEBUG_PRINT: + print(inst) + assert self.current_block is not None +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + if self.syntax_blocks: + top = self.syntax_blocks[-1] + if isinstance(top, ir.With) : +@@ -1825,6 +1829,9 @@ class Interpreter(object): + if not config.FULL_TRACEBACKS: + raise err from None + else: ++ m = f"handling op: {inst} | offset: {inst.offset}" ++ err.add_context(m) ++ err.add_context(self.bytecode.dump()) + raise err + + # --- Scope operations --- +@@ -1921,6 +1928,10 @@ class Interpreter(object): + loc=self.loc) + self.store(expr, st) + ++ def op_FORMAT_SIMPLE(self, inst, value, res, strvar): ++ # Same as FORMAT_VALUE ++ return self.op_FORMAT_VALUE(inst, value, res, strvar) ++ + def op_FORMAT_VALUE(self, inst, value, res, strvar): + """ + FORMAT_VALUE(flags): flags argument specifies format spec which is not +@@ -1971,7 +1982,7 @@ class Interpreter(object): + (), loc=self.loc) + self.store(value=sliceinst, name=res) + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + def op_BINARY_SLICE(self, inst, start, end, container, res, slicevar, + temp_res): + start = self.get(start) +@@ -1990,7 +2001,7 @@ class Interpreter(object): + else: + raise NotImplementedError(PYVERSION) + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + def op_STORE_SLICE(self, inst, start, end, container, value, res, + slicevar): + start = self.get(start) +@@ -2218,11 +2229,58 @@ class Interpreter(object): + stmt = ir.DelItem(base, self.get(indexvar), loc=self.loc) + self.current_block.append(stmt) + +- def op_LOAD_FAST(self, inst, res): ++ def _op_LOAD_FAST(self, inst, res): + srcname = self.code_locals[inst.arg] + self.store(value=self.get(srcname), name=res) + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 13), ): ++ def op_LOAD_FAST(self, inst, res, as_load_deref=False): ++ if as_load_deref: ++ self.op_LOAD_DEREF(inst, res) ++ else: ++ self._op_LOAD_FAST(inst, res) ++ ++ else: ++ op_LOAD_FAST = _op_LOAD_FAST ++ ++ if PYVERSION in ((3, 13),): ++ def op_LOAD_FAST_LOAD_FAST(self, inst, res1, res2): ++ oparg = inst.arg ++ oparg1 = oparg >> 4 ++ oparg2 = oparg & 15 ++ src1 = self.get(self.code_locals[oparg1]) ++ src2 = self.get(self.code_locals[oparg2]) ++ self.store(value=src1, name=res1) ++ self.store(value=src2, name=res2) ++ ++ def op_STORE_FAST_LOAD_FAST(self, inst, store_value, load_res): ++ oparg = inst.arg ++ oparg1 = oparg >> 4 ++ oparg2 = oparg & 15 ++ ++ dstname = self.code_locals[oparg1] ++ dst_value = self.get(store_value) ++ self.store(value=dst_value, name=dstname) ++ ++ src_value = self.get(self.code_locals[oparg2]) ++ self.store(value=src_value, name=load_res) ++ ++ def op_STORE_FAST_STORE_FAST(self, inst, value1, value2): ++ oparg = inst.arg ++ oparg1 = oparg >> 4 ++ oparg2 = oparg & 15 ++ ++ dstname = self.code_locals[oparg1] ++ self.store(value=self.get(value1), name=dstname) ++ dstname = self.code_locals[oparg2] ++ self.store(value=self.get(value2), name=dstname) ++ ++ elif PYVERSION in ((3, 10), (3, 11), (3, 12)): ++ pass ++ else: ++ raise NotImplementedError(PYVERSION) ++ ++ if PYVERSION in ((3, 12), (3, 13)): + op_LOAD_FAST_CHECK = op_LOAD_FAST + + def op_LOAD_FAST_AND_CLEAR(self, inst, res): +@@ -2269,7 +2327,7 @@ class Interpreter(object): + + def op_LOAD_ATTR(self, inst, item, res): + item = self.get(item) +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + attr = self.code_names[inst.arg >> 1] + elif PYVERSION in ((3, 9), (3, 10), (3, 11)): + attr = self.code_names[inst.arg] +@@ -2300,7 +2358,7 @@ class Interpreter(object): + const = ir.Const(value, loc=self.loc) + self.store(const, res) + +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + def op_LOAD_GLOBAL(self, inst, idx, res): + name = self.code_names[idx] + value = self.get_global_value(name) +@@ -2318,11 +2376,15 @@ class Interpreter(object): + def op_COPY_FREE_VARS(self, inst): + pass + +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + def op_LOAD_DEREF(self, inst, res): + name = self.func_id.func.__code__._varname_from_oparg(inst.arg) + if name in self.code_cellvars: +- gl = self.get(name) ++ try: ++ gl = self.get(name) ++ except NotDefinedError: ++ msg = "Unsupported use of cell variable encountered" ++ raise NotImplementedError(msg) + elif name in self.code_freevars: + idx = self.code_freevars.index(name) + value = self.get_closure_value(idx) +@@ -2343,11 +2405,11 @@ class Interpreter(object): + else: + raise NotImplementedError(PYVERSION) + +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + def op_MAKE_CELL(self, inst): + pass # ignored bytecode + +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + def op_STORE_DEREF(self, inst, value): + name = self.func_id.func.__code__._varname_from_oparg(inst.arg) + value = self.get(value) +@@ -2387,7 +2449,7 @@ class Interpreter(object): + + def op_BEFORE_WITH(self, inst, contextmanager, exitfn, end): + assert self.blocks[inst.offset] is self.current_block +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + # Python 3.12 hack for handling nested with blocks + if end > self.last_active_offset: + # Use exception entries to figure out end of syntax block +@@ -2437,6 +2499,7 @@ class Interpreter(object): + func = self.get(func) + args = [self.get(x) for x in args] + if kw_names is not None: ++ assert PYVERSION < (3, 13) + names = self.code_consts[kw_names] + kwargs = list(zip(names, args[-len(names):])) + args = args[:-len(names)] +@@ -2445,6 +2508,19 @@ class Interpreter(object): + expr = ir.Expr.call(func, args, kwargs, loc=self.loc) + self.store(expr, res) + ++ if PYVERSION in ((3, 13),): ++ def op_CALL_KW(self, inst, func, args, kw_names, res): ++ func = self.get(func) ++ args = [self.get(x) for x in args] ++ consti = int(kw_names.rsplit('.', 2)[-1]) ++ names = self.code_consts[consti] ++ kwargs = list(zip(names, args[-len(names):])) ++ args = args[:-len(names)] ++ expr = ir.Expr.call(func, args, kwargs, loc=self.loc) ++ self.store(expr, res) ++ else: ++ assert PYVERSION < (3, 13) ++ + def op_CALL_FUNCTION(self, inst, func, args, res): + func = self.get(func) + args = [self.get(x) for x in args] +@@ -2878,6 +2954,8 @@ class Interpreter(object): + jmp = ir.Jump(inst.get_jump_target(), loc=self.loc) + self.current_block.append(jmp) + ++ op_JUMP_BACKWARD_NO_INTERRUPT = op_JUMP_BACKWARD ++ + def op_POP_BLOCK(self, inst, kind=None): + if kind is None: + self.syntax_blocks.pop() +@@ -2892,7 +2970,7 @@ class Interpreter(object): + ret = ir.Return(self.get(castval), loc=self.loc) + self.current_block.append(ret) + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + def op_RETURN_CONST(self, inst, retval, castval): + value = self.code_consts[inst.arg] + const = ir.Const(value, loc=self.loc) +@@ -2905,8 +2983,20 @@ class Interpreter(object): + else: + raise NotImplementedError(PYVERSION) + ++ if PYVERSION in ((3, 13),): ++ def op_TO_BOOL(self, inst, val, res): ++ self.store(self.get(val), res) # TODO: just a lazy hack ++ ++ elif PYVERSION in ((3, 10), (3, 11), (3, 12)): ++ pass ++ else: ++ raise NotImplementedError(PYVERSION) ++ + def op_COMPARE_OP(self, inst, lhs, rhs, res): +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 13),): ++ op = dis.cmp_op[inst.arg >> 5] ++ # TODO: fifth lowest bit now indicates a forced version to bool. ++ elif PYVERSION in ((3, 12),): + op = dis.cmp_op[inst.arg >> 4] + elif PYVERSION in ((3, 9), (3, 10), (3, 11)): + op = dis.cmp_op[inst.arg] +@@ -3024,7 +3114,7 @@ class Interpreter(object): + def op_POP_JUMP_FORWARD_IF_NOT_NONE(self, inst, pred): + self._jump_if_none(inst, pred, False) + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + def op_POP_JUMP_IF_NONE(self, inst, pred): + self._jump_if_none(inst, pred, True) + +@@ -3152,7 +3242,7 @@ class Interpreter(object): + "Probably caused by complex control-flow constructs; " + "e.g. try-except" + ) +- raise errors.UnsupportedError(msg, loc=self.loc) ++ raise errors.UnsupportedBytecodeError(msg, loc=self.loc) + fcode = assume_code_const.value + if name: + name = self.get(name) +@@ -3166,14 +3256,14 @@ class Interpreter(object): + self.op_MAKE_FUNCTION(inst, name, code, closure, annotations, + kwdefaults, defaults, res) + +- if PYVERSION in ((3, 11), (3, 12)): ++ if PYVERSION in ((3, 11), (3, 12), (3, 13)): + def op_LOAD_CLOSURE(self, inst, res): + name = self.func_id.func.__code__._varname_from_oparg(inst.arg) + if name in self.code_cellvars: + try: + gl = self.get(name) + except NotDefinedError: +- msg = "Unsupported use of op_LOAD_CLOSURE encountered" ++ msg = "Unsupported use of cell variable encountered" + raise NotImplementedError(msg) + elif name in self.code_freevars: + idx = self.code_freevars.index(name) +@@ -3191,7 +3281,7 @@ class Interpreter(object): + try: + gl = self.get(name) + except NotDefinedError: +- msg = "Unsupported use of op_LOAD_CLOSURE encountered" ++ msg = "Unsupported use of cell variable encountered" + raise NotImplementedError(msg) + else: + idx = inst.arg - n_cellvars +@@ -3228,7 +3318,7 @@ class Interpreter(object): + "op_LIST_EXTEND at the start of a block.\n\nThis could be " + "due to the use of a branch in a tuple unpacking statement.") + if not self.current_block.body: +- raise errors.UnsupportedError(msg) ++ raise errors.UnsupportedBytecodeError(msg) + + # is last emitted statement a build_tuple? + stmt = self.current_block.body[-1] +@@ -3258,7 +3348,7 @@ class Interpreter(object): + ok = False + break + if ok and build_empty_list is None: +- raise errors.UnsupportedError(msg) ++ raise errors.UnsupportedBytecodeError(msg) + if ok: + stmts = self.current_block.body + build_tuple_asgn = self.current_block.body[-1] +@@ -3304,7 +3394,7 @@ class Interpreter(object): + def op_CALL_METHOD(self, *args, **kws): + self.op_CALL_FUNCTION(*args, **kws) + +- if PYVERSION in ((3, 12), ): ++ if PYVERSION in ((3, 12), (3, 13)): + def op_CALL_INTRINSIC_1(self, inst, operand, **kwargs): + if operand == ci1op.INTRINSIC_STOPITERATION_ERROR: + stmt = ir.StaticRaise(INTRINSIC_STOPITERATION_ERROR, (), +@@ -3325,7 +3415,7 @@ class Interpreter(object): + raise NotImplementedError(PYVERSION) + + +-if PYVERSION in ((3, 12), ): ++if PYVERSION in ((3, 12), (3, 13)): + class INTRINSIC_STOPITERATION_ERROR(AssertionError): + pass + elif PYVERSION in ((3, 9), (3, 10), (3, 11)): +Index: numba-0.60.0/numba/cpython/unicode.py +=================================================================== +--- numba-0.60.0.orig/numba/cpython/unicode.py ++++ numba-0.60.0/numba/cpython/unicode.py +@@ -349,7 +349,7 @@ def _set_code_point(a, i, ch): + "Unexpected unicode representation in _set_code_point") + + +-if PYVERSION in ((3, 12),): ++if PYVERSION in ((3, 12), (3, 13)): + @register_jitable + def _pick_kind(kind1, kind2): + if kind1 == PY_UNICODE_1BYTE_KIND: +@@ -393,7 +393,7 @@ def _pick_ascii(is_ascii1, is_ascii2): + return types.uint32(0) + + +-if PYVERSION in ((3, 12),): ++if PYVERSION in ((3, 12), (3, 13)): + @register_jitable + def _kind_to_byte_width(kind): + if kind == PY_UNICODE_1BYTE_KIND: +@@ -2047,7 +2047,7 @@ def _is_upper(is_lower, is_upper, is_tit + def impl(a): + l = len(a) + if l == 1: +- return is_upper(_get_code_point(a, 0)) ++ return is_upper(_get_code_point(a, 0)) != 0 + if l == 0: + return False + cased = False +Index: numba-0.60.0/numba/experimental/jitclass/base.py +=================================================================== +--- numba-0.60.0.orig/numba/experimental/jitclass/base.py ++++ numba-0.60.0/numba/experimental/jitclass/base.py +@@ -282,6 +282,9 @@ def _drop_ignored_attrs(dct): + drop = set(['__weakref__', + '__module__', + '__dict__']) ++ if utils.PYVERSION == (3, 13): ++ # new in python 3.13 ++ drop |= set(['__firstlineno__', '__static_attributes__']) + + if '__annotations__' in dct: + drop.add('__annotations__') +@@ -300,7 +303,7 @@ def _drop_ignored_attrs(dct): + drop.add('__hash__') + + for k in drop: +- del dct[k] ++ dct.pop(k) + + + class ClassBuilder(object): +Index: numba-0.60.0/numba/core/compiler.py +=================================================================== +--- numba-0.60.0.orig/numba/core/compiler.py ++++ numba-0.60.0/numba/core/compiler.py +@@ -476,10 +476,7 @@ class CompilerBase(object): + res = e.result + break + except Exception as e: +- if (utils.use_new_style_errors() and not +- isinstance(e, errors.NumbaError)): +- raise e +- ++ utils.handle_new_style_errors(e) + self.state.status.fail_reason = e + if is_final_pipeline: + raise e +Index: numba-0.60.0/numba/core/compiler_machinery.py +=================================================================== +--- numba-0.60.0.orig/numba/core/compiler_machinery.py ++++ numba-0.60.0/numba/core/compiler_machinery.py +@@ -304,7 +304,8 @@ class PassManager(object): + args=str(internal_state.args), + return_type=str(internal_state.return_type), + ) +- with ev.trigger_event("numba:run_pass", data=ev_details): ++ errctx = errors.new_error_context(f"Pass {pss.name()}") ++ with ev.trigger_event("numba:run_pass", data=ev_details), errctx: + with SimpleTimer() as init_time: + mutated |= check(pss.run_initialization, internal_state) + with SimpleTimer() as pass_time: +@@ -359,9 +360,7 @@ class PassManager(object): + except _EarlyPipelineCompletion as e: + raise e + except Exception as e: +- if (utils.use_new_style_errors() and not +- isinstance(e, errors.NumbaError)): +- raise e ++ utils.handle_new_style_errors(e) + msg = "Failed in %s mode pipeline (step: %s)" % \ + (self.pipeline_name, pass_desc) + patched_exception = self._patch_error(msg, e) +Index: numba-0.60.0/numba/core/errors.py +=================================================================== +--- numba-0.60.0.orig/numba/core/errors.py ++++ numba-0.60.0/numba/core/errors.py +@@ -532,7 +532,6 @@ class WarningsFixer(object): + + + class NumbaError(Exception): +- + def __init__(self, msg, loc=None, highlighting=True): + self.msg = msg + self.loc = loc +@@ -578,7 +577,13 @@ class UnsupportedError(NumbaError): + """ + Numba does not have an implementation for this functionality. + """ +- pass ++ ++ ++class UnsupportedBytecodeError(Exception): ++ """Unsupported bytecode is non-recoverable ++ """ ++ def __init__(self, msg, loc=None): ++ super().__init__(f"{msg}. Raised from {loc}") + + + class UnsupportedRewriteError(UnsupportedError): +Index: numba-0.60.0/numba/core/types/functions.py +=================================================================== +--- numba-0.60.0.orig/numba/core/types/functions.py ++++ numba-0.60.0/numba/core/types/functions.py +@@ -307,12 +307,9 @@ class BaseFunction(Callable): + for k, v in kws.items()} + sig = temp.apply(nolitargs, nolitkws) + except Exception as e: +- if (utils.use_new_style_errors() and not +- isinstance(e, errors.NumbaError)): +- raise e +- else: +- sig = None +- failures.add_error(temp, False, e, uselit) ++ utils.handle_new_style_errors(e) ++ sig = None ++ failures.add_error(temp, False, e, uselit) + else: + if sig is not None: + self._impl_keys[sig.args] = temp.get_impl_key(sig) +Index: numba-0.60.0/numba/core/utils.py +=================================================================== +--- numba-0.60.0.orig/numba/core/utils.py ++++ numba-0.60.0/numba/core/utils.py +@@ -230,6 +230,17 @@ def use_old_style_errors(): + return res + + ++def handle_new_style_errors(e): ++ """Handle new_style error by raising the exception immediately if they are ++ non-recoverable. ++ """ ++ from numba.core import errors ++ ++ if use_new_style_errors(): ++ if not isinstance(e, errors.NumbaError): ++ raise e ++ ++ + class ThreadLocalStack: + """A TLS stack container. + +Index: numba-0.60.0/numba/stencils/stencil.py +=================================================================== +--- numba-0.60.0.orig/numba/stencils/stencil.py ++++ numba-0.60.0/numba/stencils/stencil.py +@@ -402,8 +402,9 @@ class StencilFunc(object): + sig = signature(real_ret, *argtys_extra) + dummy_text = ("def __numba_dummy_stencil({}{}):\n pass\n".format( + ",".join(self.kernel_ir.arg_names), sig_extra)) +- exec(dummy_text) in globals(), locals() +- dummy_func = eval("__numba_dummy_stencil") ++ dct = {} ++ exec(dummy_text, dct) ++ dummy_func = dct["__numba_dummy_stencil"] + sig = sig.replace(pysig=utils.pysignature(dummy_func)) + self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)]) + self._type_cache[argtys_extra] = (sig, result, typemap, calltypes) +@@ -659,8 +660,10 @@ class StencilFunc(object): + print(func_text) + + # Force the new stencil function into existence. +- exec(func_text) in globals(), locals() +- stencil_func = eval(stencil_func_name) ++ dct = {} ++ dct.update(globals()) ++ exec(func_text, dct) ++ stencil_func = dct[stencil_func_name] + if sigret is not None: + pysig = utils.pysignature(stencil_func) + sigret.pysig = pysig +Index: numba-0.60.0/numba/tests/test_debug.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_debug.py ++++ numba-0.60.0/numba/tests/test_debug.py +@@ -73,7 +73,7 @@ class DebugTestBase(TestCase): + self.assert_fails(check_meth, out) + + def _check_dump_bytecode(self, out): +- if utils.PYVERSION in ((3, 11), (3, 12)): ++ if utils.PYVERSION in ((3, 11), (3, 12), (3, 13)): + self.assertIn('BINARY_OP', out) + elif utils.PYVERSION in ((3, 9), (3, 10)): + self.assertIn('BINARY_ADD', out) +Index: numba-0.60.0/numba/tests/test_ir_inlining.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_ir_inlining.py ++++ numba-0.60.0/numba/tests/test_ir_inlining.py +@@ -444,7 +444,7 @@ class TestFunctionInlining(MemoryLeakMix + return bar(z + 2) + + # block count changes with Python version due to bytecode differences. +- if utils.PYVERSION in ((3, 12), ): ++ if utils.PYVERSION in ((3, 12), (3, 13)): + bc = 39 + elif utils.PYVERSION in ((3, 10), (3, 11)): + bc = 35 +Index: numba-0.60.0/numba/tests/test_closure.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_closure.py ++++ numba-0.60.0/numba/tests/test_closure.py +@@ -400,13 +400,13 @@ class TestInlinedClosure(TestCase): + with self.assertRaises(NotImplementedError) as raises: + cfunc = jit(nopython=True)(outer3) + cfunc(var) +- msg = "Unsupported use of op_LOAD_CLOSURE encountered" ++ msg = "Unsupported use of cell variable encountered" + self.assertIn(msg, str(raises.exception)) + + with self.assertRaises(NotImplementedError) as raises: + cfunc = jit(nopython=True)(outer4) + cfunc(var) +- msg = "Unsupported use of op_LOAD_CLOSURE encountered" ++ msg = "Unsupported use of cell variable encountered" + self.assertIn(msg, str(raises.exception)) + + with self.assertRaises(TypingError) as raises: +Index: numba-0.60.0/numba/core/inline_closurecall.py +=================================================================== +--- numba-0.60.0.orig/numba/core/inline_closurecall.py ++++ numba-0.60.0/numba/core/inline_closurecall.py +@@ -95,7 +95,7 @@ class InlineClosureCallPass(object): + modified = False + work_list = list(self.func_ir.blocks.items()) + debug_print = _make_debug_print("InlineClosureCallPass") +- debug_print("START") ++ debug_print(f"START {self.func_ir.func_id.func_qualname}") + while work_list: + _label, block = work_list.pop() + for i, instr in enumerate(block.body): +Index: numba-0.60.0/numba/pycc/modulemixin.c +=================================================================== +--- numba-0.60.0.orig/numba/pycc/modulemixin.c ++++ numba-0.60.0/numba/pycc/modulemixin.c +@@ -23,6 +23,12 @@ + #include "../core/runtime/nrt.h" + #endif + ++#if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION >= 12) ++ #define Py_BUILD_CORE 1 ++ #include "internal/pycore_pyhash.h" ++ #undef Py_BUILD_CORE ++#endif ++ + /* Defines hashsecret variables (see issue #6386) */ + int64_t _numba_hashsecret_siphash_k0; + int64_t _numba_hashsecret_siphash_k1; +Index: numba-0.60.0/numba/tests/test_operators.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_operators.py ++++ numba-0.60.0/numba/tests/test_operators.py +@@ -768,7 +768,7 @@ class TestOperators(TestCase): + # error message depends on Python version. + if utils.PYVERSION in ((3, 9),): + msg = "can't mod complex numbers" +- elif utils.PYVERSION in ((3, 10), (3, 11), (3, 12)): ++ elif utils.PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)): + msg = "unsupported operand type(s) for %" + else: + raise NotImplementedError(utils.PYVERSION) +Index: numba-0.60.0/numba/tests/test_parfors.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_parfors.py ++++ numba-0.60.0/numba/tests/test_parfors.py +@@ -3550,7 +3550,7 @@ class TestPrangeBase(TestParforsBase): + prange_names.append('prange') + prange_names = tuple(prange_names) + prange_idx = len(prange_names) - 1 +- if utils.PYVERSION in ((3, 11), (3, 12)): ++ if utils.PYVERSION in ((3, 11), (3, 12), (3, 13)): + # this is the inverse of _fix_LOAD_GLOBAL_arg + prange_idx = 1 + (prange_idx << 1) + elif utils.PYVERSION in ((3, 9), (3, 10)): +Index: numba-0.60.0/numba/tests/support.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/support.py ++++ numba-0.60.0/numba/tests/support.py +@@ -128,6 +128,12 @@ def expected_failure_np2(fn): + else: + return fn + ++def expected_failure_py313(fn): ++ if utils.PYVERSION == (3, 13): ++ return unittest.expectedFailure(fn) ++ else: ++ return fn ++ + _msg = "SciPy needed for test" + skip_unless_scipy = unittest.skipIf(scipy is None, _msg) + +Index: numba-0.60.0/numba/tests/test_np_functions.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_np_functions.py ++++ numba-0.60.0/numba/tests/test_np_functions.py +@@ -6192,8 +6192,9 @@ def foo(): + tystr = ty.__name__ + basestr = basefunc.__name__ + funcstr = self.template % (tystr, basestr) +- eval(compile(funcstr, '', 'exec')) +- return locals()['foo'] ++ dct = {} ++ exec(compile(funcstr, '', 'exec'), globals(), dct) ++ return dct['foo'] + + @unittest.skipIf(numpy_version >= (1, 24), "NumPy < 1.24 required") + def test_MachAr(self): +Index: numba-0.60.0/numba/tests/test_unicode.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_unicode.py ++++ numba-0.60.0/numba/tests/test_unicode.py +@@ -6,10 +6,12 @@ from numba import njit, typeof + from numba.core import types + import unittest + from numba.tests.support import (TestCase, no_pyobj_flags, MemoryLeakMixin) +-from numba.core.errors import TypingError, UnsupportedError ++from numba.core.errors import (TypingError, UnsupportedError, ++ UnsupportedBytecodeError) + from numba.cpython.unicode import _MAX_UNICODE + from numba.core.types.functions import _header_lead + from numba.extending import overload ++from numba.core.utils import PYVERSION + + + def isascii(s): +@@ -2697,10 +2699,17 @@ class TestUnicodeAuxillary(BaseTest): + self.assertEqual(got, expected) + + # check error when format spec provided +- with self.assertRaises(UnsupportedError) as raises: ++ unsupported_errors = (UnsupportedError, UnsupportedBytecodeError) ++ with self.assertRaises(unsupported_errors) as raises: + njit(impl4)(["A", "B"]) +- msg = "format spec in f-strings not supported yet" +- self.assertIn(msg, str(raises.exception)) ++ if PYVERSION in ((3, 13),): ++ msg = "Use of unsupported opcode (FORMAT_WITH_SPEC)" ++ self.assertIn(msg, str(raises.exception)) ++ elif PYVERSION in ((3, 10), (3, 11), (3, 12)): ++ msg = "format spec in f-strings not supported yet" ++ self.assertIn(msg, str(raises.exception)) ++ else: ++ raise NotImplementedError(PYVERSION) + self.assertEqual(impl5(), njit(impl5)()) + + +Index: numba-0.60.0/numba/core/ir.py +=================================================================== +--- numba-0.60.0.orig/numba/core/ir.py ++++ numba-0.60.0/numba/core/ir.py +@@ -90,9 +90,12 @@ class Loc(object): + + def get_lines(self): + if self.lines is None: +- +- self.lines = linecache.getlines(self._get_path()) +- ++ path = self._get_path() ++ # Avoid reading from dynamic string. They are most likely ++ # overridden. Problem started with Python 3.13. "" seems ++ # to be something from multiprocessing. ++ lns = [] if path == "" else linecache.getlines(path) ++ self.lines = lns + return self.lines + + def _get_path(self): +@@ -1496,7 +1499,7 @@ class FunctionIR(object): + self.block_entry_vars = {} + + def derive(self, blocks, arg_count=None, arg_names=None, +- force_non_generator=False): ++ force_non_generator=False, loc=None): + """ + Derive a new function IR from this one, using the given blocks, + and possibly modifying the argument count and generator flag. +@@ -1507,7 +1510,7 @@ class FunctionIR(object): + + new_ir = copy.copy(self) + new_ir.blocks = blocks +- new_ir.loc = firstblock.loc ++ new_ir.loc = firstblock.loc if loc is None else loc + if force_non_generator: + new_ir.is_generator = False + if arg_count is not None: +Index: numba-0.60.0/numba/core/transforms.py +=================================================================== +--- numba-0.60.0.orig/numba/core/transforms.py ++++ numba-0.60.0/numba/core/transforms.py +@@ -191,12 +191,20 @@ def _loop_lift_modify_blocks(func_ir, lo + loopblocks = dict((k, blocks[k].copy()) for k in loopblockkeys) + # Modify the loop blocks + _loop_lift_prepare_loop_func(loopinfo, loopblocks) +- ++ # Since Python 3.13, [END_FOR, POP_TOP] sequence becomes the start of the ++ # block causing the block to have line number of the start of previous loop. ++ # Fix this using the loc of the first getiter. ++ getiter_exprs = [] ++ for blk in loopblocks.values(): ++ getiter_exprs.extend(blk.find_exprs(op="getiter")) ++ first_getiter = min(getiter_exprs, key=lambda x: x.loc.line) ++ loop_loc = first_getiter.loc + # Create a new IR for the lifted loop + lifted_ir = func_ir.derive(blocks=loopblocks, + arg_names=tuple(loopinfo.inputs), + arg_count=len(loopinfo.inputs), +- force_non_generator=True) ++ force_non_generator=True, ++ loc=loop_loc) + liftedloop = LiftedLoop(lifted_ir, + typingctx, targetctx, flags, locals) + +Index: numba-0.60.0/numba/tests/test_exceptions.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_exceptions.py ++++ numba-0.60.0/numba/tests/test_exceptions.py +@@ -6,6 +6,7 @@ from numba import jit, njit + from numba.core import types, errors, utils + from numba.tests.support import (TestCase, expected_failure_py311, + expected_failure_py312, ++ expected_failure_py313, + ) + import unittest + +@@ -440,6 +441,7 @@ class TestRaising(TestCase): + + @expected_failure_py311 + @expected_failure_py312 ++ @expected_failure_py313 + def test_dynamic_raise(self): + + @njit +Index: numba-0.60.0/numba/tests/test_try_except.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_try_except.py ++++ numba-0.60.0/numba/tests/test_try_except.py +@@ -8,12 +8,13 @@ from numba import njit, typed, objmode, + from numba.core.utils import PYVERSION + from numba.core import ir_utils, ir + from numba.core.errors import ( +- UnsupportedError, CompilerError, NumbaPerformanceWarning, TypingError, ++ CompilerError, NumbaPerformanceWarning, TypingError, ++ UnsupportedBytecodeError, + ) + from numba.tests.support import ( + TestCase, unittest, captured_stdout, MemoryLeakMixin, + skip_parfors_unsupported, skip_unless_scipy, expected_failure_py311, +- expected_failure_py312 ++ expected_failure_py312, expected_failure_py313, + ) + + +@@ -372,7 +373,7 @@ class TestTryBareExcept(TestCase): + except: # noqa: E722 + raise + +- with self.assertRaises(UnsupportedError) as raises: ++ with self.assertRaises(UnsupportedBytecodeError) as raises: + udt() + self.assertIn( + "The re-raising of an exception is not yet supported.", +@@ -459,7 +460,7 @@ class TestTryExceptCaught(TestCase): + return r + return r + +- with self.assertRaises(UnsupportedError) as raises: ++ with self.assertRaises(UnsupportedBytecodeError) as raises: + udt(True) + self.assertIn( + "Exception object cannot be stored into variable (e)", +@@ -474,7 +475,7 @@ class TestTryExceptCaught(TestCase): + except Exception: + raise + +- with self.assertRaises(UnsupportedError) as raises: ++ with self.assertRaises(UnsupportedBytecodeError) as raises: + udt() + self.assertIn( + "The re-raising of an exception is not yet supported.", +@@ -492,7 +493,7 @@ class TestTryExceptCaught(TestCase): + except Exception: + raise + +- with self.assertRaises(UnsupportedError) as raises: ++ with self.assertRaises(UnsupportedBytecodeError) as raises: + udt() + self.assertIn( + "The re-raising of an exception is not yet supported.", +@@ -692,6 +693,7 @@ class TestTryExceptOtherControlFlow(Test + + @expected_failure_py311 + @expected_failure_py312 ++ @expected_failure_py313 + def test_objmode(self): + @njit + def udt(): +@@ -712,6 +714,7 @@ class TestTryExceptOtherControlFlow(Test + + @expected_failure_py311 + @expected_failure_py312 ++ @expected_failure_py313 + def test_objmode_output_type(self): + def bar(x): + return np.asarray(list(reversed(x.tolist()))) +Index: numba-0.60.0/numba/tests/test_withlifting.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_withlifting.py ++++ numba-0.60.0/numba/tests/test_withlifting.py +@@ -16,7 +16,8 @@ from numba.tests.support import (MemoryL + skip_unless_scipy, linux_only, + strace_supported, strace, + expected_failure_py311, +- expected_failure_py312) ++ expected_failure_py312, ++ expected_failure_py313) + from numba.core.utils import PYVERSION + from numba.experimental import jitclass + import unittest +@@ -280,6 +281,7 @@ class TestLiftCall(BaseTestWithLifting): + + @expected_failure_py311 + @expected_failure_py312 ++ @expected_failure_py313 + def test_liftcall5(self): + self.check_extracted_with(liftcall5, expect_count=1, + expected_stdout="0\n1\n2\n3\n4\n5\nA\n") +@@ -719,6 +721,7 @@ class TestLiftObj(MemoryLeak, TestCase): + + @expected_failure_py311 + @expected_failure_py312 ++ @expected_failure_py313 + def test_case19_recursion(self): + def foo(x): + with objmode_context(): +@@ -1169,7 +1172,7 @@ class TestBogusContext(BaseTestWithLifti + with open('') as f: + pass + +- with self.assertRaises(errors.UnsupportedError) as raises: ++ with self.assertRaises(errors.UnsupportedBytecodeError) as raises: + foo() + + excstr = str(raises.exception) +Index: numba-0.60.0/numba/tests/test_sys_monitoring.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_sys_monitoring.py ++++ numba-0.60.0/numba/tests/test_sys_monitoring.py +@@ -4,7 +4,7 @@ import sys + import threading + import unittest + from unittest.mock import Mock, call +-from numba.tests.support import TestCase, skip_unless_py312 ++from numba.tests.support import TestCase + from numba import jit, objmode + from numba.core.utils import PYVERSION + from numba.core.serialize import _numba_unpickle +@@ -21,7 +21,7 @@ def generate_usecase(): + return foo, call_foo + + +-if PYVERSION == (3, 12): ++if PYVERSION in ((3, 12), (3, 13)): + PY_START = sys.monitoring.events.PY_START + PY_RETURN = sys.monitoring.events.PY_RETURN + RAISE = sys.monitoring.events.RAISE +@@ -36,7 +36,7 @@ TOOL2MONITORTYPE = {0 : "Debugger", + 5 : "Optimizer"} + + +-@skip_unless_py312 ++@unittest.skipUnless(PYVERSION >= (3, 12), "needs Python 3.12+") + class TestMonitoring(TestCase): + # Tests the interaction of the Numba dispatcher with `sys.monitoring`. + # +@@ -724,7 +724,7 @@ class TestMonitoring(TestCase): + self.assertFalse(q2.qsize()) + + +-@skip_unless_py312 ++@unittest.skipUnless(PYVERSION >= (3, 12), "needs Python 3.12+") + class TestMonitoringSelfTest(TestCase): + + def test_skipping_of_tests_if_monitoring_in_use(self): +Index: numba-0.60.0/numba/_random.c +=================================================================== +--- numba-0.60.0.orig/numba/_random.c ++++ numba-0.60.0/numba/_random.c +@@ -195,7 +195,7 @@ rnd_implicit_init(rnd_state_t *state) + Py_buffer buf; + PyGILState_STATE gilstate = PyGILState_Ensure(); + +- module = PyImport_ImportModuleNoBlock("os"); ++ module = PyImport_ImportModule("os"); + if (module == NULL) + goto error; + /* Read as many bytes as necessary to get the full entropy +Index: numba-0.60.0/numba/core/pythonapi.py +=================================================================== +--- numba-0.60.0.orig/numba/core/pythonapi.py ++++ numba-0.60.0/numba/core/pythonapi.py +@@ -919,9 +919,9 @@ class PythonAPI(object): + # Other APIs (organize them better!) + # + +- def import_module_noblock(self, modname): ++ def import_module(self, modname): + fnty = ir.FunctionType(self.pyobj, [self.cstring]) +- fn = self._get_function(fnty, name="PyImport_ImportModuleNoBlock") ++ fn = self._get_function(fnty, name="PyImport_ImportModule") + return self.builder.call(fn, [modname]) + + def call_function_objargs(self, callee, objargs): +Index: numba-0.60.0/numba/experimental/function_type.py +=================================================================== +--- numba-0.60.0.orig/numba/experimental/function_type.py ++++ numba-0.60.0/numba/experimental/function_type.py +@@ -181,7 +181,7 @@ def lower_get_wrapper_address(context, b + # caller. + + modname = context.insert_const_string(builder.module, __name__) +- numba_mod = pyapi.import_module_noblock(modname) ++ numba_mod = pyapi.import_module(modname) + numba_func = pyapi.object_getattr_string( + numba_mod, '_get_wrapper_address') + pyapi.decref(numba_mod) +@@ -263,3 +263,4 @@ def lower_cast_dispatcher_to_function_ty + llty = context.get_value_type(types.voidptr) + sfunc.pyaddr = builder.ptrtoint(val, llty) + return sfunc._getvalue() ++ +Index: numba-0.60.0/numba/typed/typeddict.py +=================================================================== +--- numba-0.60.0.orig/numba/typed/typeddict.py ++++ numba-0.60.0/numba/typed/typeddict.py +@@ -266,7 +266,7 @@ def box_dicttype(typ, val, c): + modname = c.context.insert_const_string( + c.builder.module, 'numba.typed.typeddict', + ) +- typeddict_mod = c.pyapi.import_module_noblock(modname) ++ typeddict_mod = c.pyapi.import_module(modname) + fmp_fn = c.pyapi.object_getattr_string(typeddict_mod, '_from_meminfo_ptr') + + dicttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ)) +Index: numba-0.60.0/numba/typed/typedlist.py +=================================================================== +--- numba-0.60.0.orig/numba/typed/typedlist.py ++++ numba-0.60.0/numba/typed/typedlist.py +@@ -471,7 +471,7 @@ def box_lsttype(typ, val, c): + modname = c.context.insert_const_string( + c.builder.module, 'numba.typed.typedlist', + ) +- typedlist_mod = c.pyapi.import_module_noblock(modname) ++ typedlist_mod = c.pyapi.import_module(modname) + fmp_fn = c.pyapi.object_getattr_string(typedlist_mod, '_from_meminfo_ptr') + + lsttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ)) +Index: numba-0.60.0/numba/tests/test_interpreter.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_interpreter.py ++++ numba-0.60.0/numba/tests/test_interpreter.py +@@ -5,7 +5,7 @@ import unittest + from numba import jit, njit, objmode, typeof, literally + from numba.extending import overload + from numba.core import types +-from numba.core.errors import UnsupportedError ++from numba.core.errors import UnsupportedBytecodeError + from numba.tests.support import ( + TestCase, + MemoryLeakMixin, +@@ -388,7 +388,7 @@ class TestCallFunctionExPeepHole(MemoryL + arg41=1, + ) + +- with self.assertRaises(UnsupportedError) as raises: ++ with self.assertRaises(UnsupportedBytecodeError) as raises: + njit()(inline_func)(False) + self.assertIn( + 'You can resolve this issue by moving the control flow out', +@@ -498,7 +498,7 @@ class TestCallFunctionExPeepHole(MemoryL + 1, + ) + +- with self.assertRaises(UnsupportedError) as raises: ++ with self.assertRaises(UnsupportedBytecodeError) as raises: + njit()(inline_func)(False) + self.assertIn( + 'You can resolve this issue by moving the control flow out', +@@ -585,7 +585,7 @@ class TestCallFunctionExPeepHole(MemoryL + arg15=1 if flag else 2, + ) + +- with self.assertRaises(UnsupportedError) as raises: ++ with self.assertRaises(UnsupportedBytecodeError) as raises: + njit()(inline_func)(False) + self.assertIn( + 'You can resolve this issue by moving the control flow out', +@@ -973,7 +973,7 @@ class TestLargeConstDict(TestCase, Memor + } + return d["S"] + +- with self.assertRaises(UnsupportedError) as raises: ++ with self.assertRaises(UnsupportedBytecodeError) as raises: + njit()(inline_func)("a_string", False) + self.assertIn( + 'You can resolve this issue by moving the control flow out', +Index: numba-0.60.0/numba/tests/test_tuples.py +=================================================================== +--- numba-0.60.0.orig/numba/tests/test_tuples.py ++++ numba-0.60.0/numba/tests/test_tuples.py +@@ -731,7 +731,7 @@ class TestTupleBuild(TestCase): + b = (3,2, 4) + return (*(b if a[0] else (5, 6)),) + +- with self.assertRaises(errors.UnsupportedError) as raises: ++ with self.assertRaises(errors.UnsupportedBytecodeError) as raises: + foo() + msg = "op_LIST_EXTEND at the start of a block" + self.assertIn(msg, str(raises.exception)) +Index: numba-0.60.0/setup.py +=================================================================== +--- numba-0.60.0.orig/setup.py ++++ numba-0.60.0/setup.py +@@ -20,7 +20,7 @@ except ImportError: + + + min_python_version = "3.9" +-max_python_version = "3.13" # exclusive ++max_python_version = "3.14" # exclusive + min_numpy_build_version = "2.0.0rc1" + min_numpy_run_version = "1.22" + max_numpy_run_version = "2.1" +Index: numba-0.60.0/numba/core/boxing.py +=================================================================== +--- numba-0.60.0.orig/numba/core/boxing.py ++++ numba-0.60.0/numba/core/boxing.py +@@ -655,7 +655,7 @@ class _NumbaTypeHelper(object): + def __enter__(self): + c = self.c + numba_name = c.context.insert_const_string(c.builder.module, 'numba') +- numba_mod = c.pyapi.import_module_noblock(numba_name) ++ numba_mod = c.pyapi.import_module(numba_name) + typeof_fn = c.pyapi.object_getattr_string(numba_mod, 'typeof') + self.typeof_fn = typeof_fn + c.pyapi.decref(numba_mod) +@@ -1213,7 +1213,7 @@ def unbox_numpy_random_bitgenerator(typ, + # store the results. + # First find ctypes.cast, and ctypes.c_void_p + ctypes_name = c.context.insert_const_string(c.builder.module, 'ctypes') +- ctypes_module = c.pyapi.import_module_noblock(ctypes_name) ++ ctypes_module = c.pyapi.import_module(ctypes_name) + extra_refs.append(ctypes_module) + with cgutils.early_exit_if_null(c.builder, stack, ctypes_module): + handle_failure() +Index: numba-0.60.0/numba/pythoncapi_compat.h +=================================================================== +--- /dev/null ++++ numba-0.60.0/numba/pythoncapi_compat.h +@@ -0,0 +1,1696 @@ ++// Header file providing new C API functions to old Python versions. ++// ++// File distributed under the Zero Clause BSD (0BSD) license. ++// Copyright Contributors to the pythoncapi_compat project. ++// ++// Homepage: ++// https://github.com/python/pythoncapi_compat ++// ++// Latest version: ++// https://raw.githubusercontent.com/python/pythoncapi-compat/0041177c4f348c8952b4c8980b2c90856e61c7c7/pythoncapi_compat.h ++// ++// SPDX-License-Identifier: 0BSD ++ ++#ifndef PYTHONCAPI_COMPAT ++#define PYTHONCAPI_COMPAT ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#include ++ ++// Python 3.11.0b4 added PyFrame_Back() to Python.h ++#if PY_VERSION_HEX < 0x030b00B4 && !defined(PYPY_VERSION) ++# include "frameobject.h" // PyFrameObject, PyFrame_GetBack() ++#endif ++ ++ ++#ifndef _Py_CAST ++# define _Py_CAST(type, expr) ((type)(expr)) ++#endif ++ ++// Static inline functions should use _Py_NULL rather than using directly NULL ++// to prevent C++ compiler warnings. On C23 and newer and on C++11 and newer, ++// _Py_NULL is defined as nullptr. ++#if (defined (__STDC_VERSION__) && __STDC_VERSION__ > 201710L) \ ++ || (defined(__cplusplus) && __cplusplus >= 201103) ++# define _Py_NULL nullptr ++#else ++# define _Py_NULL NULL ++#endif ++ ++// Cast argument to PyObject* type. ++#ifndef _PyObject_CAST ++# define _PyObject_CAST(op) _Py_CAST(PyObject*, op) ++#endif ++ ++#ifndef Py_BUILD_ASSERT ++# define Py_BUILD_ASSERT(cond) \ ++ do { \ ++ (void)sizeof(char [1 - 2 * !(cond)]); \ ++ } while(0) ++#endif ++ ++ ++// bpo-42262 added Py_NewRef() to Python 3.10.0a3 ++#if PY_VERSION_HEX < 0x030A00A3 && !defined(Py_NewRef) ++static inline PyObject* _Py_NewRef(PyObject *obj) ++{ ++ Py_INCREF(obj); ++ return obj; ++} ++#define Py_NewRef(obj) _Py_NewRef(_PyObject_CAST(obj)) ++#endif ++ ++ ++// bpo-42262 added Py_XNewRef() to Python 3.10.0a3 ++#if PY_VERSION_HEX < 0x030A00A3 && !defined(Py_XNewRef) ++static inline PyObject* _Py_XNewRef(PyObject *obj) ++{ ++ Py_XINCREF(obj); ++ return obj; ++} ++#define Py_XNewRef(obj) _Py_XNewRef(_PyObject_CAST(obj)) ++#endif ++ ++ ++// bpo-39573 added Py_SET_REFCNT() to Python 3.9.0a4 ++#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_REFCNT) ++static inline void _Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt) ++{ ++ ob->ob_refcnt = refcnt; ++} ++#define Py_SET_REFCNT(ob, refcnt) _Py_SET_REFCNT(_PyObject_CAST(ob), refcnt) ++#endif ++ ++ ++// Py_SETREF() and Py_XSETREF() were added to Python 3.5.2. ++// It is excluded from the limited C API. ++#if (PY_VERSION_HEX < 0x03050200 && !defined(Py_SETREF)) && !defined(Py_LIMITED_API) ++#define Py_SETREF(dst, src) \ ++ do { \ ++ PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \ ++ PyObject *_tmp_dst = (*_tmp_dst_ptr); \ ++ *_tmp_dst_ptr = _PyObject_CAST(src); \ ++ Py_DECREF(_tmp_dst); \ ++ } while (0) ++ ++#define Py_XSETREF(dst, src) \ ++ do { \ ++ PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \ ++ PyObject *_tmp_dst = (*_tmp_dst_ptr); \ ++ *_tmp_dst_ptr = _PyObject_CAST(src); \ ++ Py_XDECREF(_tmp_dst); \ ++ } while (0) ++#endif ++ ++ ++// bpo-43753 added Py_Is(), Py_IsNone(), Py_IsTrue() and Py_IsFalse() ++// to Python 3.10.0b1. ++#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_Is) ++# define Py_Is(x, y) ((x) == (y)) ++#endif ++#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_IsNone) ++# define Py_IsNone(x) Py_Is(x, Py_None) ++#endif ++#if (PY_VERSION_HEX < 0x030A00B1 || defined(PYPY_VERSION)) && !defined(Py_IsTrue) ++# define Py_IsTrue(x) Py_Is(x, Py_True) ++#endif ++#if (PY_VERSION_HEX < 0x030A00B1 || defined(PYPY_VERSION)) && !defined(Py_IsFalse) ++# define Py_IsFalse(x) Py_Is(x, Py_False) ++#endif ++ ++ ++// bpo-39573 added Py_SET_TYPE() to Python 3.9.0a4 ++#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE) ++static inline void _Py_SET_TYPE(PyObject *ob, PyTypeObject *type) ++{ ++ ob->ob_type = type; ++} ++#define Py_SET_TYPE(ob, type) _Py_SET_TYPE(_PyObject_CAST(ob), type) ++#endif ++ ++ ++// bpo-39573 added Py_SET_SIZE() to Python 3.9.0a4 ++#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_SIZE) ++static inline void _Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size) ++{ ++ ob->ob_size = size; ++} ++#define Py_SET_SIZE(ob, size) _Py_SET_SIZE((PyVarObject*)(ob), size) ++#endif ++ ++ ++// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1 ++#if PY_VERSION_HEX < 0x030900B1 || defined(PYPY_VERSION) ++static inline PyCodeObject* PyFrame_GetCode(PyFrameObject *frame) ++{ ++ assert(frame != _Py_NULL); ++ assert(frame->f_code != _Py_NULL); ++ return _Py_CAST(PyCodeObject*, Py_NewRef(frame->f_code)); ++} ++#endif ++ ++static inline PyCodeObject* _PyFrame_GetCodeBorrow(PyFrameObject *frame) ++{ ++ PyCodeObject *code = PyFrame_GetCode(frame); ++ Py_DECREF(code); ++ return code; ++} ++ ++ ++// bpo-40421 added PyFrame_GetBack() to Python 3.9.0b1 ++#if PY_VERSION_HEX < 0x030900B1 && !defined(PYPY_VERSION) ++static inline PyFrameObject* PyFrame_GetBack(PyFrameObject *frame) ++{ ++ assert(frame != _Py_NULL); ++ return _Py_CAST(PyFrameObject*, Py_XNewRef(frame->f_back)); ++} ++#endif ++ ++#if !defined(PYPY_VERSION) ++static inline PyFrameObject* _PyFrame_GetBackBorrow(PyFrameObject *frame) ++{ ++ PyFrameObject *back = PyFrame_GetBack(frame); ++ Py_XDECREF(back); ++ return back; ++} ++#endif ++ ++ ++// bpo-40421 added PyFrame_GetLocals() to Python 3.11.0a7 ++#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION) ++static inline PyObject* PyFrame_GetLocals(PyFrameObject *frame) ++{ ++#if PY_VERSION_HEX >= 0x030400B1 ++ if (PyFrame_FastToLocalsWithError(frame) < 0) { ++ return NULL; ++ } ++#else ++ PyFrame_FastToLocals(frame); ++#endif ++ return Py_NewRef(frame->f_locals); ++} ++#endif ++ ++ ++// bpo-40421 added PyFrame_GetGlobals() to Python 3.11.0a7 ++#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION) ++static inline PyObject* PyFrame_GetGlobals(PyFrameObject *frame) ++{ ++ return Py_NewRef(frame->f_globals); ++} ++#endif ++ ++ ++// bpo-40421 added PyFrame_GetBuiltins() to Python 3.11.0a7 ++#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION) ++static inline PyObject* PyFrame_GetBuiltins(PyFrameObject *frame) ++{ ++ return Py_NewRef(frame->f_builtins); ++} ++#endif ++ ++ ++// bpo-40421 added PyFrame_GetLasti() to Python 3.11.0b1 ++#if PY_VERSION_HEX < 0x030B00B1 && !defined(PYPY_VERSION) ++static inline int PyFrame_GetLasti(PyFrameObject *frame) ++{ ++#if PY_VERSION_HEX >= 0x030A00A7 ++ // bpo-27129: Since Python 3.10.0a7, f_lasti is an instruction offset, ++ // not a bytes offset anymore. Python uses 16-bit "wordcode" (2 bytes) ++ // instructions. ++ if (frame->f_lasti < 0) { ++ return -1; ++ } ++ return frame->f_lasti * 2; ++#else ++ return frame->f_lasti; ++#endif ++} ++#endif ++ ++ ++// gh-91248 added PyFrame_GetVar() to Python 3.12.0a2 ++#if PY_VERSION_HEX < 0x030C00A2 && !defined(PYPY_VERSION) ++static inline PyObject* PyFrame_GetVar(PyFrameObject *frame, PyObject *name) ++{ ++ PyObject *locals, *value; ++ ++ locals = PyFrame_GetLocals(frame); ++ if (locals == NULL) { ++ return NULL; ++ } ++#if PY_VERSION_HEX >= 0x03000000 ++ value = PyDict_GetItemWithError(locals, name); ++#else ++ value = _PyDict_GetItemWithError(locals, name); ++#endif ++ Py_DECREF(locals); ++ ++ if (value == NULL) { ++ if (PyErr_Occurred()) { ++ return NULL; ++ } ++#if PY_VERSION_HEX >= 0x03000000 ++ PyErr_Format(PyExc_NameError, "variable %R does not exist", name); ++#else ++ PyErr_SetString(PyExc_NameError, "variable does not exist"); ++#endif ++ return NULL; ++ } ++ return Py_NewRef(value); ++} ++#endif ++ ++ ++// gh-91248 added PyFrame_GetVarString() to Python 3.12.0a2 ++#if PY_VERSION_HEX < 0x030C00A2 && !defined(PYPY_VERSION) ++static inline PyObject* ++PyFrame_GetVarString(PyFrameObject *frame, const char *name) ++{ ++ PyObject *name_obj, *value; ++#if PY_VERSION_HEX >= 0x03000000 ++ name_obj = PyUnicode_FromString(name); ++#else ++ name_obj = PyString_FromString(name); ++#endif ++ if (name_obj == NULL) { ++ return NULL; ++ } ++ value = PyFrame_GetVar(frame, name_obj); ++ Py_DECREF(name_obj); ++ return value; ++} ++#endif ++ ++ ++// bpo-39947 added PyThreadState_GetInterpreter() to Python 3.9.0a5 ++#if PY_VERSION_HEX < 0x030900A5 || defined(PYPY_VERSION) ++static inline PyInterpreterState * ++PyThreadState_GetInterpreter(PyThreadState *tstate) ++{ ++ assert(tstate != _Py_NULL); ++ return tstate->interp; ++} ++#endif ++ ++ ++// bpo-40429 added PyThreadState_GetFrame() to Python 3.9.0b1 ++#if PY_VERSION_HEX < 0x030900B1 && !defined(PYPY_VERSION) ++static inline PyFrameObject* PyThreadState_GetFrame(PyThreadState *tstate) ++{ ++ assert(tstate != _Py_NULL); ++ return _Py_CAST(PyFrameObject *, Py_XNewRef(tstate->frame)); ++} ++#endif ++ ++#if !defined(PYPY_VERSION) ++static inline PyFrameObject* ++_PyThreadState_GetFrameBorrow(PyThreadState *tstate) ++{ ++ PyFrameObject *frame = PyThreadState_GetFrame(tstate); ++ Py_XDECREF(frame); ++ return frame; ++} ++#endif ++ ++ ++// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a5 ++#if PY_VERSION_HEX < 0x030900A5 || defined(PYPY_VERSION) ++static inline PyInterpreterState* PyInterpreterState_Get(void) ++{ ++ PyThreadState *tstate; ++ PyInterpreterState *interp; ++ ++ tstate = PyThreadState_GET(); ++ if (tstate == _Py_NULL) { ++ Py_FatalError("GIL released (tstate is NULL)"); ++ } ++ interp = tstate->interp; ++ if (interp == _Py_NULL) { ++ Py_FatalError("no current interpreter"); ++ } ++ return interp; ++} ++#endif ++ ++ ++// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a6 ++#if 0x030700A1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030900A6 && !defined(PYPY_VERSION) ++static inline uint64_t PyThreadState_GetID(PyThreadState *tstate) ++{ ++ assert(tstate != _Py_NULL); ++ return tstate->id; ++} ++#endif ++ ++// bpo-43760 added PyThreadState_EnterTracing() to Python 3.11.0a2 ++#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION) ++static inline void PyThreadState_EnterTracing(PyThreadState *tstate) ++{ ++ tstate->tracing++; ++#if PY_VERSION_HEX >= 0x030A00A1 ++ tstate->cframe->use_tracing = 0; ++#else ++ tstate->use_tracing = 0; ++#endif ++} ++#endif ++ ++// bpo-43760 added PyThreadState_LeaveTracing() to Python 3.11.0a2 ++#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION) ++static inline void PyThreadState_LeaveTracing(PyThreadState *tstate) ++{ ++ int use_tracing = (tstate->c_tracefunc != _Py_NULL ++ || tstate->c_profilefunc != _Py_NULL); ++ tstate->tracing--; ++#if PY_VERSION_HEX >= 0x030A00A1 ++ tstate->cframe->use_tracing = use_tracing; ++#else ++ tstate->use_tracing = use_tracing; ++#endif ++} ++#endif ++ ++ ++// bpo-37194 added PyObject_CallNoArgs() to Python 3.9.0a1 ++// PyObject_CallNoArgs() added to PyPy 3.9.16-v7.3.11 ++#if !defined(PyObject_CallNoArgs) && PY_VERSION_HEX < 0x030900A1 ++static inline PyObject* PyObject_CallNoArgs(PyObject *func) ++{ ++ return PyObject_CallFunctionObjArgs(func, NULL); ++} ++#endif ++ ++ ++// bpo-39245 made PyObject_CallOneArg() public (previously called ++// _PyObject_CallOneArg) in Python 3.9.0a4 ++// PyObject_CallOneArg() added to PyPy 3.9.16-v7.3.11 ++#if !defined(PyObject_CallOneArg) && PY_VERSION_HEX < 0x030900A4 ++static inline PyObject* PyObject_CallOneArg(PyObject *func, PyObject *arg) ++{ ++ return PyObject_CallFunctionObjArgs(func, arg, NULL); ++} ++#endif ++ ++ ++// bpo-1635741 added PyModule_AddObjectRef() to Python 3.10.0a3 ++#if PY_VERSION_HEX < 0x030A00A3 ++static inline int ++PyModule_AddObjectRef(PyObject *module, const char *name, PyObject *value) ++{ ++ int res; ++ ++ if (!value && !PyErr_Occurred()) { ++ // PyModule_AddObject() raises TypeError in this case ++ PyErr_SetString(PyExc_SystemError, ++ "PyModule_AddObjectRef() must be called " ++ "with an exception raised if value is NULL"); ++ return -1; ++ } ++ ++ Py_XINCREF(value); ++ res = PyModule_AddObject(module, name, value); ++ if (res < 0) { ++ Py_XDECREF(value); ++ } ++ return res; ++} ++#endif ++ ++ ++// bpo-40024 added PyModule_AddType() to Python 3.9.0a5 ++#if PY_VERSION_HEX < 0x030900A5 ++static inline int PyModule_AddType(PyObject *module, PyTypeObject *type) ++{ ++ const char *name, *dot; ++ ++ if (PyType_Ready(type) < 0) { ++ return -1; ++ } ++ ++ // inline _PyType_Name() ++ name = type->tp_name; ++ assert(name != _Py_NULL); ++ dot = strrchr(name, '.'); ++ if (dot != _Py_NULL) { ++ name = dot + 1; ++ } ++ ++ return PyModule_AddObjectRef(module, name, _PyObject_CAST(type)); ++} ++#endif ++ ++ ++// bpo-40241 added PyObject_GC_IsTracked() to Python 3.9.0a6. ++// bpo-4688 added _PyObject_GC_IS_TRACKED() to Python 2.7.0a2. ++#if PY_VERSION_HEX < 0x030900A6 && !defined(PYPY_VERSION) ++static inline int PyObject_GC_IsTracked(PyObject* obj) ++{ ++ return (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj)); ++} ++#endif ++ ++// bpo-40241 added PyObject_GC_IsFinalized() to Python 3.9.0a6. ++// bpo-18112 added _PyGCHead_FINALIZED() to Python 3.4.0 final. ++#if PY_VERSION_HEX < 0x030900A6 && PY_VERSION_HEX >= 0x030400F0 && !defined(PYPY_VERSION) ++static inline int PyObject_GC_IsFinalized(PyObject *obj) ++{ ++ PyGC_Head *gc = _Py_CAST(PyGC_Head*, obj) - 1; ++ return (PyObject_IS_GC(obj) && _PyGCHead_FINALIZED(gc)); ++} ++#endif ++ ++ ++// bpo-39573 added Py_IS_TYPE() to Python 3.9.0a4 ++#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_IS_TYPE) ++static inline int _Py_IS_TYPE(PyObject *ob, PyTypeObject *type) { ++ return Py_TYPE(ob) == type; ++} ++#define Py_IS_TYPE(ob, type) _Py_IS_TYPE(_PyObject_CAST(ob), type) ++#endif ++ ++ ++// bpo-46906 added PyFloat_Pack2() and PyFloat_Unpack2() to Python 3.11a7. ++// bpo-11734 added _PyFloat_Pack2() and _PyFloat_Unpack2() to Python 3.6.0b1. ++// Python 3.11a2 moved _PyFloat_Pack2() and _PyFloat_Unpack2() to the internal ++// C API: Python 3.11a2-3.11a6 versions are not supported. ++#if 0x030600B1 <= PY_VERSION_HEX && PY_VERSION_HEX <= 0x030B00A1 && !defined(PYPY_VERSION) ++static inline int PyFloat_Pack2(double x, char *p, int le) ++{ return _PyFloat_Pack2(x, (unsigned char*)p, le); } ++ ++static inline double PyFloat_Unpack2(const char *p, int le) ++{ return _PyFloat_Unpack2((const unsigned char *)p, le); } ++#endif ++ ++ ++// bpo-46906 added PyFloat_Pack4(), PyFloat_Pack8(), PyFloat_Unpack4() and ++// PyFloat_Unpack8() to Python 3.11a7. ++// Python 3.11a2 moved _PyFloat_Pack4(), _PyFloat_Pack8(), _PyFloat_Unpack4() ++// and _PyFloat_Unpack8() to the internal C API: Python 3.11a2-3.11a6 versions ++// are not supported. ++#if PY_VERSION_HEX <= 0x030B00A1 && !defined(PYPY_VERSION) ++static inline int PyFloat_Pack4(double x, char *p, int le) ++{ return _PyFloat_Pack4(x, (unsigned char*)p, le); } ++ ++static inline int PyFloat_Pack8(double x, char *p, int le) ++{ return _PyFloat_Pack8(x, (unsigned char*)p, le); } ++ ++static inline double PyFloat_Unpack4(const char *p, int le) ++{ return _PyFloat_Unpack4((const unsigned char *)p, le); } ++ ++static inline double PyFloat_Unpack8(const char *p, int le) ++{ return _PyFloat_Unpack8((const unsigned char *)p, le); } ++#endif ++ ++ ++// gh-92154 added PyCode_GetCode() to Python 3.11.0b1 ++#if PY_VERSION_HEX < 0x030B00B1 && !defined(PYPY_VERSION) ++static inline PyObject* PyCode_GetCode(PyCodeObject *code) ++{ ++ return Py_NewRef(code->co_code); ++} ++#endif ++ ++ ++// gh-95008 added PyCode_GetVarnames() to Python 3.11.0rc1 ++#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION) ++static inline PyObject* PyCode_GetVarnames(PyCodeObject *code) ++{ ++ return Py_NewRef(code->co_varnames); ++} ++#endif ++ ++// gh-95008 added PyCode_GetFreevars() to Python 3.11.0rc1 ++#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION) ++static inline PyObject* PyCode_GetFreevars(PyCodeObject *code) ++{ ++ return Py_NewRef(code->co_freevars); ++} ++#endif ++ ++// gh-95008 added PyCode_GetCellvars() to Python 3.11.0rc1 ++#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION) ++static inline PyObject* PyCode_GetCellvars(PyCodeObject *code) ++{ ++ return Py_NewRef(code->co_cellvars); ++} ++#endif ++ ++ ++// Py_UNUSED() was added to Python 3.4.0b2. ++#if PY_VERSION_HEX < 0x030400B2 && !defined(Py_UNUSED) ++# if defined(__GNUC__) || defined(__clang__) ++# define Py_UNUSED(name) _unused_ ## name __attribute__((unused)) ++# else ++# define Py_UNUSED(name) _unused_ ## name ++# endif ++#endif ++ ++ ++// gh-105922 added PyImport_AddModuleRef() to Python 3.13.0a1 ++#if PY_VERSION_HEX < 0x030D00A0 ++static inline PyObject* PyImport_AddModuleRef(const char *name) ++{ ++ return Py_XNewRef(PyImport_AddModule(name)); ++} ++#endif ++ ++ ++// gh-105927 added PyWeakref_GetRef() to Python 3.13.0a1 ++#if PY_VERSION_HEX < 0x030D0000 ++static inline int PyWeakref_GetRef(PyObject *ref, PyObject **pobj) ++{ ++ PyObject *obj; ++ if (ref != NULL && !PyWeakref_Check(ref)) { ++ *pobj = NULL; ++ PyErr_SetString(PyExc_TypeError, "expected a weakref"); ++ return -1; ++ } ++ obj = PyWeakref_GetObject(ref); ++ if (obj == NULL) { ++ // SystemError if ref is NULL ++ *pobj = NULL; ++ return -1; ++ } ++ if (obj == Py_None) { ++ *pobj = NULL; ++ return 0; ++ } ++ *pobj = Py_NewRef(obj); ++ return (*pobj != NULL); ++} ++#endif ++ ++ ++// bpo-36974 added PY_VECTORCALL_ARGUMENTS_OFFSET to Python 3.8b1 ++#ifndef PY_VECTORCALL_ARGUMENTS_OFFSET ++# define PY_VECTORCALL_ARGUMENTS_OFFSET (_Py_CAST(size_t, 1) << (8 * sizeof(size_t) - 1)) ++#endif ++ ++// bpo-36974 added PyVectorcall_NARGS() to Python 3.8b1 ++#if PY_VERSION_HEX < 0x030800B1 ++static inline Py_ssize_t PyVectorcall_NARGS(size_t n) ++{ ++ return n & ~PY_VECTORCALL_ARGUMENTS_OFFSET; ++} ++#endif ++ ++ ++// gh-105922 added PyObject_Vectorcall() to Python 3.9.0a4 ++#if PY_VERSION_HEX < 0x030900A4 ++static inline PyObject* ++PyObject_Vectorcall(PyObject *callable, PyObject *const *args, ++ size_t nargsf, PyObject *kwnames) ++{ ++#if PY_VERSION_HEX >= 0x030800B1 && !defined(PYPY_VERSION) ++ // bpo-36974 added _PyObject_Vectorcall() to Python 3.8.0b1 ++ return _PyObject_Vectorcall(callable, args, nargsf, kwnames); ++#else ++ PyObject *posargs = NULL, *kwargs = NULL; ++ PyObject *res; ++ Py_ssize_t nposargs, nkwargs, i; ++ ++ if (nargsf != 0 && args == NULL) { ++ PyErr_BadInternalCall(); ++ goto error; ++ } ++ if (kwnames != NULL && !PyTuple_Check(kwnames)) { ++ PyErr_BadInternalCall(); ++ goto error; ++ } ++ ++ nposargs = (Py_ssize_t)PyVectorcall_NARGS(nargsf); ++ if (kwnames) { ++ nkwargs = PyTuple_GET_SIZE(kwnames); ++ } ++ else { ++ nkwargs = 0; ++ } ++ ++ posargs = PyTuple_New(nposargs); ++ if (posargs == NULL) { ++ goto error; ++ } ++ if (nposargs) { ++ for (i=0; i < nposargs; i++) { ++ PyTuple_SET_ITEM(posargs, i, Py_NewRef(*args)); ++ args++; ++ } ++ } ++ ++ if (nkwargs) { ++ kwargs = PyDict_New(); ++ if (kwargs == NULL) { ++ goto error; ++ } ++ ++ for (i = 0; i < nkwargs; i++) { ++ PyObject *key = PyTuple_GET_ITEM(kwnames, i); ++ PyObject *value = *args; ++ args++; ++ if (PyDict_SetItem(kwargs, key, value) < 0) { ++ goto error; ++ } ++ } ++ } ++ else { ++ kwargs = NULL; ++ } ++ ++ res = PyObject_Call(callable, posargs, kwargs); ++ Py_DECREF(posargs); ++ Py_XDECREF(kwargs); ++ return res; ++ ++error: ++ Py_DECREF(posargs); ++ Py_XDECREF(kwargs); ++ return NULL; ++#endif ++} ++#endif ++ ++ ++// gh-106521 added PyObject_GetOptionalAttr() and ++// PyObject_GetOptionalAttrString() to Python 3.13.0a1 ++#if PY_VERSION_HEX < 0x030D00A1 ++static inline int ++PyObject_GetOptionalAttr(PyObject *obj, PyObject *attr_name, PyObject **result) ++{ ++ // bpo-32571 added _PyObject_LookupAttr() to Python 3.7.0b1 ++#if PY_VERSION_HEX >= 0x030700B1 && !defined(PYPY_VERSION) ++ return _PyObject_LookupAttr(obj, attr_name, result); ++#else ++ *result = PyObject_GetAttr(obj, attr_name); ++ if (*result != NULL) { ++ return 1; ++ } ++ if (!PyErr_Occurred()) { ++ return 0; ++ } ++ if (PyErr_ExceptionMatches(PyExc_AttributeError)) { ++ PyErr_Clear(); ++ return 0; ++ } ++ return -1; ++#endif ++} ++ ++static inline int ++PyObject_GetOptionalAttrString(PyObject *obj, const char *attr_name, PyObject **result) ++{ ++ PyObject *name_obj; ++ int rc; ++#if PY_VERSION_HEX >= 0x03000000 ++ name_obj = PyUnicode_FromString(attr_name); ++#else ++ name_obj = PyString_FromString(attr_name); ++#endif ++ if (name_obj == NULL) { ++ *result = NULL; ++ return -1; ++ } ++ rc = PyObject_GetOptionalAttr(obj, name_obj, result); ++ Py_DECREF(name_obj); ++ return rc; ++} ++#endif ++ ++ ++// gh-106307 added PyObject_GetOptionalAttr() and ++// PyMapping_GetOptionalItemString() to Python 3.13.0a1 ++#if PY_VERSION_HEX < 0x030D00A1 ++static inline int ++PyMapping_GetOptionalItem(PyObject *obj, PyObject *key, PyObject **result) ++{ ++ *result = PyObject_GetItem(obj, key); ++ if (*result) { ++ return 1; ++ } ++ if (!PyErr_ExceptionMatches(PyExc_KeyError)) { ++ return -1; ++ } ++ PyErr_Clear(); ++ return 0; ++} ++ ++static inline int ++PyMapping_GetOptionalItemString(PyObject *obj, const char *key, PyObject **result) ++{ ++ PyObject *key_obj; ++ int rc; ++#if PY_VERSION_HEX >= 0x03000000 ++ key_obj = PyUnicode_FromString(key); ++#else ++ key_obj = PyString_FromString(key); ++#endif ++ if (key_obj == NULL) { ++ *result = NULL; ++ return -1; ++ } ++ rc = PyMapping_GetOptionalItem(obj, key_obj, result); ++ Py_DECREF(key_obj); ++ return rc; ++} ++#endif ++ ++// gh-108511 added PyMapping_HasKeyWithError() and ++// PyMapping_HasKeyStringWithError() to Python 3.13.0a1 ++#if PY_VERSION_HEX < 0x030D00A1 ++static inline int ++PyMapping_HasKeyWithError(PyObject *obj, PyObject *key) ++{ ++ PyObject *res; ++ int rc = PyMapping_GetOptionalItem(obj, key, &res); ++ Py_XDECREF(res); ++ return rc; ++} ++ ++static inline int ++PyMapping_HasKeyStringWithError(PyObject *obj, const char *key) ++{ ++ PyObject *res; ++ int rc = PyMapping_GetOptionalItemString(obj, key, &res); ++ Py_XDECREF(res); ++ return rc; ++} ++#endif ++ ++ ++// gh-108511 added PyObject_HasAttrWithError() and ++// PyObject_HasAttrStringWithError() to Python 3.13.0a1 ++#if PY_VERSION_HEX < 0x030D00A1 ++static inline int ++PyObject_HasAttrWithError(PyObject *obj, PyObject *attr) ++{ ++ PyObject *res; ++ int rc = PyObject_GetOptionalAttr(obj, attr, &res); ++ Py_XDECREF(res); ++ return rc; ++} ++ ++static inline int ++PyObject_HasAttrStringWithError(PyObject *obj, const char *attr) ++{ ++ PyObject *res; ++ int rc = PyObject_GetOptionalAttrString(obj, attr, &res); ++ Py_XDECREF(res); ++ return rc; ++} ++#endif ++ ++ ++// gh-106004 added PyDict_GetItemRef() and PyDict_GetItemStringRef() ++// to Python 3.13.0a1 ++#if PY_VERSION_HEX < 0x030D00A1 ++static inline int ++PyDict_GetItemRef(PyObject *mp, PyObject *key, PyObject **result) ++{ ++#if PY_VERSION_HEX >= 0x03000000 ++ PyObject *item = PyDict_GetItemWithError(mp, key); ++#else ++ PyObject *item = _PyDict_GetItemWithError(mp, key); ++#endif ++ if (item != NULL) { ++ *result = Py_NewRef(item); ++ return 1; // found ++ } ++ if (!PyErr_Occurred()) { ++ *result = NULL; ++ return 0; // not found ++ } ++ *result = NULL; ++ return -1; ++} ++ ++static inline int ++PyDict_GetItemStringRef(PyObject *mp, const char *key, PyObject **result) ++{ ++ int res; ++#if PY_VERSION_HEX >= 0x03000000 ++ PyObject *key_obj = PyUnicode_FromString(key); ++#else ++ PyObject *key_obj = PyString_FromString(key); ++#endif ++ if (key_obj == NULL) { ++ *result = NULL; ++ return -1; ++ } ++ res = PyDict_GetItemRef(mp, key_obj, result); ++ Py_DECREF(key_obj); ++ return res; ++} ++#endif ++ ++ ++// gh-106307 added PyModule_Add() to Python 3.13.0a1 ++#if PY_VERSION_HEX < 0x030D00A1 ++static inline int ++PyModule_Add(PyObject *mod, const char *name, PyObject *value) ++{ ++ int res = PyModule_AddObjectRef(mod, name, value); ++ Py_XDECREF(value); ++ return res; ++} ++#endif ++ ++ ++// gh-108014 added Py_IsFinalizing() to Python 3.13.0a1 ++// bpo-1856 added _Py_Finalizing to Python 3.2.1b1. ++// _Py_IsFinalizing() was added to PyPy 7.3.0. ++#if (0x030201B1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030D00A1) \ ++ && (!defined(PYPY_VERSION_NUM) || PYPY_VERSION_NUM >= 0x7030000) ++static inline int Py_IsFinalizing(void) ++{ ++#if PY_VERSION_HEX >= 0x030700A1 ++ // _Py_IsFinalizing() was added to Python 3.7.0a1. ++ return _Py_IsFinalizing(); ++#else ++ return (_Py_Finalizing != NULL); ++#endif ++} ++#endif ++ ++ ++// gh-108323 added PyDict_ContainsString() to Python 3.13.0a1 ++#if PY_VERSION_HEX < 0x030D00A1 ++static inline int PyDict_ContainsString(PyObject *op, const char *key) ++{ ++ PyObject *key_obj = PyUnicode_FromString(key); ++ if (key_obj == NULL) { ++ return -1; ++ } ++ int res = PyDict_Contains(op, key_obj); ++ Py_DECREF(key_obj); ++ return res; ++} ++#endif ++ ++ ++// gh-108445 added PyLong_AsInt() to Python 3.13.0a1 ++#if PY_VERSION_HEX < 0x030D00A1 ++static inline int PyLong_AsInt(PyObject *obj) ++{ ++#ifdef PYPY_VERSION ++ long value = PyLong_AsLong(obj); ++ if (value == -1 && PyErr_Occurred()) { ++ return -1; ++ } ++ if (value < (long)INT_MIN || (long)INT_MAX < value) { ++ PyErr_SetString(PyExc_OverflowError, ++ "Python int too large to convert to C int"); ++ return -1; ++ } ++ return (int)value; ++#else ++ return _PyLong_AsInt(obj); ++#endif ++} ++#endif ++ ++ ++// gh-107073 added PyObject_VisitManagedDict() to Python 3.13.0a1 ++#if PY_VERSION_HEX < 0x030D00A1 ++static inline int ++PyObject_VisitManagedDict(PyObject *obj, visitproc visit, void *arg) ++{ ++ PyObject **dict = _PyObject_GetDictPtr(obj); ++ if (*dict == NULL) { ++ return -1; ++ } ++ Py_VISIT(*dict); ++ return 0; ++} ++ ++static inline void ++PyObject_ClearManagedDict(PyObject *obj) ++{ ++ PyObject **dict = _PyObject_GetDictPtr(obj); ++ if (*dict == NULL) { ++ return; ++ } ++ Py_CLEAR(*dict); ++} ++#endif ++ ++// gh-108867 added PyThreadState_GetUnchecked() to Python 3.13.0a1 ++// Python 3.5.2 added _PyThreadState_UncheckedGet(). ++#if PY_VERSION_HEX >= 0x03050200 && PY_VERSION_HEX < 0x030D00A1 ++static inline PyThreadState* ++PyThreadState_GetUnchecked(void) ++{ ++ return _PyThreadState_UncheckedGet(); ++} ++#endif ++ ++// gh-110289 added PyUnicode_EqualToUTF8() and PyUnicode_EqualToUTF8AndSize() ++// to Python 3.13.0a1 ++#if PY_VERSION_HEX < 0x030D00A1 ++static inline int ++PyUnicode_EqualToUTF8AndSize(PyObject *unicode, const char *str, Py_ssize_t str_len) ++{ ++ Py_ssize_t len; ++ const void *utf8; ++ PyObject *exc_type, *exc_value, *exc_tb; ++ int res; ++ ++ // API cannot report errors so save/restore the exception ++ PyErr_Fetch(&exc_type, &exc_value, &exc_tb); ++ ++ // Python 3.3.0a1 added PyUnicode_AsUTF8AndSize() ++#if PY_VERSION_HEX >= 0x030300A1 ++ if (PyUnicode_IS_ASCII(unicode)) { ++ utf8 = PyUnicode_DATA(unicode); ++ len = PyUnicode_GET_LENGTH(unicode); ++ } ++ else { ++ utf8 = PyUnicode_AsUTF8AndSize(unicode, &len); ++ if (utf8 == NULL) { ++ // Memory allocation failure. The API cannot report error, ++ // so ignore the exception and return 0. ++ res = 0; ++ goto done; ++ } ++ } ++ ++ if (len != str_len) { ++ res = 0; ++ goto done; ++ } ++ res = (memcmp(utf8, str, (size_t)len) == 0); ++#else ++ PyObject *bytes = PyUnicode_AsUTF8String(unicode); ++ if (bytes == NULL) { ++ // Memory allocation failure. The API cannot report error, ++ // so ignore the exception and return 0. ++ res = 0; ++ goto done; ++ } ++ ++#if PY_VERSION_HEX >= 0x03000000 ++ len = PyBytes_GET_SIZE(bytes); ++ utf8 = PyBytes_AS_STRING(bytes); ++#else ++ len = PyString_GET_SIZE(bytes); ++ utf8 = PyString_AS_STRING(bytes); ++#endif ++ if (len != str_len) { ++ Py_DECREF(bytes); ++ res = 0; ++ goto done; ++ } ++ ++ res = (memcmp(utf8, str, (size_t)len) == 0); ++ Py_DECREF(bytes); ++#endif ++ ++done: ++ PyErr_Restore(exc_type, exc_value, exc_tb); ++ return res; ++} ++ ++static inline int ++PyUnicode_EqualToUTF8(PyObject *unicode, const char *str) ++{ ++ return PyUnicode_EqualToUTF8AndSize(unicode, str, (Py_ssize_t)strlen(str)); ++} ++#endif ++ ++ ++// gh-111138 added PyList_Extend() and PyList_Clear() to Python 3.13.0a2 ++#if PY_VERSION_HEX < 0x030D00A2 ++static inline int ++PyList_Extend(PyObject *list, PyObject *iterable) ++{ ++ return PyList_SetSlice(list, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, iterable); ++} ++ ++static inline int ++PyList_Clear(PyObject *list) ++{ ++ return PyList_SetSlice(list, 0, PY_SSIZE_T_MAX, NULL); ++} ++#endif ++ ++// gh-111262 added PyDict_Pop() and PyDict_PopString() to Python 3.13.0a2 ++#if PY_VERSION_HEX < 0x030D00A2 ++static inline int ++PyDict_Pop(PyObject *dict, PyObject *key, PyObject **result) ++{ ++ PyObject *value; ++ ++ if (!PyDict_Check(dict)) { ++ PyErr_BadInternalCall(); ++ if (result) { ++ *result = NULL; ++ } ++ return -1; ++ } ++ ++ // bpo-16991 added _PyDict_Pop() to Python 3.5.0b2. ++ // Python 3.6.0b3 changed _PyDict_Pop() first argument type to PyObject*. ++ // Python 3.13.0a1 removed _PyDict_Pop(). ++#if defined(PYPY_VERSION) || PY_VERSION_HEX < 0x030500b2 || PY_VERSION_HEX >= 0x030D0000 ++ value = PyObject_CallMethod(dict, "pop", "O", key); ++#elif PY_VERSION_HEX < 0x030600b3 ++ value = _PyDict_Pop(_Py_CAST(PyDictObject*, dict), key, NULL); ++#else ++ value = _PyDict_Pop(dict, key, NULL); ++#endif ++ if (value == NULL) { ++ if (result) { ++ *result = NULL; ++ } ++ if (PyErr_Occurred() && !PyErr_ExceptionMatches(PyExc_KeyError)) { ++ return -1; ++ } ++ PyErr_Clear(); ++ return 0; ++ } ++ if (result) { ++ *result = value; ++ } ++ else { ++ Py_DECREF(value); ++ } ++ return 1; ++} ++ ++static inline int ++PyDict_PopString(PyObject *dict, const char *key, PyObject **result) ++{ ++ PyObject *key_obj = PyUnicode_FromString(key); ++ if (key_obj == NULL) { ++ if (result != NULL) { ++ *result = NULL; ++ } ++ return -1; ++ } ++ ++ int res = PyDict_Pop(dict, key_obj, result); ++ Py_DECREF(key_obj); ++ return res; ++} ++#endif ++ ++ ++#if PY_VERSION_HEX < 0x030200A4 ++// Python 3.2.0a4 added Py_hash_t type ++typedef Py_ssize_t Py_hash_t; ++#endif ++ ++ ++// gh-111545 added Py_HashPointer() to Python 3.13.0a3 ++#if PY_VERSION_HEX < 0x030D00A3 ++static inline Py_hash_t Py_HashPointer(const void *ptr) ++{ ++#if PY_VERSION_HEX >= 0x030900A4 && !defined(PYPY_VERSION) ++ return _Py_HashPointer(ptr); ++#else ++ return _Py_HashPointer(_Py_CAST(void*, ptr)); ++#endif ++} ++#endif ++ ++ ++// Python 3.13a4 added a PyTime API. ++// Use the private API added to Python 3.5. ++#if PY_VERSION_HEX < 0x030D00A4 && PY_VERSION_HEX >= 0x03050000 ++typedef _PyTime_t PyTime_t; ++#define PyTime_MIN _PyTime_MIN ++#define PyTime_MAX _PyTime_MAX ++ ++static inline double PyTime_AsSecondsDouble(PyTime_t t) ++{ return _PyTime_AsSecondsDouble(t); } ++ ++static inline int PyTime_Monotonic(PyTime_t *result) ++{ return _PyTime_GetMonotonicClockWithInfo(result, NULL); } ++ ++static inline int PyTime_Time(PyTime_t *result) ++{ return _PyTime_GetSystemClockWithInfo(result, NULL); } ++ ++static inline int PyTime_PerfCounter(PyTime_t *result) ++{ ++#if PY_VERSION_HEX >= 0x03070000 && !defined(PYPY_VERSION) ++ return _PyTime_GetPerfCounterWithInfo(result, NULL); ++#elif PY_VERSION_HEX >= 0x03070000 ++ // Call time.perf_counter_ns() and convert Python int object to PyTime_t. ++ // Cache time.perf_counter_ns() function for best performance. ++ static PyObject *func = NULL; ++ if (func == NULL) { ++ PyObject *mod = PyImport_ImportModule("time"); ++ if (mod == NULL) { ++ return -1; ++ } ++ ++ func = PyObject_GetAttrString(mod, "perf_counter_ns"); ++ Py_DECREF(mod); ++ if (func == NULL) { ++ return -1; ++ } ++ } ++ ++ PyObject *res = PyObject_CallNoArgs(func); ++ if (res == NULL) { ++ return -1; ++ } ++ long long value = PyLong_AsLongLong(res); ++ Py_DECREF(res); ++ ++ if (value == -1 && PyErr_Occurred()) { ++ return -1; ++ } ++ ++ Py_BUILD_ASSERT(sizeof(value) >= sizeof(PyTime_t)); ++ *result = (PyTime_t)value; ++ return 0; ++#else ++ // Call time.perf_counter() and convert C double to PyTime_t. ++ // Cache time.perf_counter() function for best performance. ++ static PyObject *func = NULL; ++ if (func == NULL) { ++ PyObject *mod = PyImport_ImportModule("time"); ++ if (mod == NULL) { ++ return -1; ++ } ++ ++ func = PyObject_GetAttrString(mod, "perf_counter"); ++ Py_DECREF(mod); ++ if (func == NULL) { ++ return -1; ++ } ++ } ++ ++ PyObject *res = PyObject_CallNoArgs(func); ++ if (res == NULL) { ++ return -1; ++ } ++ double d = PyFloat_AsDouble(res); ++ Py_DECREF(res); ++ ++ if (d == -1.0 && PyErr_Occurred()) { ++ return -1; ++ } ++ ++ // Avoid floor() to avoid having to link to libm ++ *result = (PyTime_t)(d * 1e9); ++ return 0; ++#endif ++} ++ ++#endif ++ ++// gh-111389 added hash constants to Python 3.13.0a5. These constants were ++// added first as private macros to Python 3.4.0b1 and PyPy 7.3.9. ++#if (!defined(PyHASH_BITS) \ ++ && ((!defined(PYPY_VERSION) && PY_VERSION_HEX >= 0x030400B1) \ ++ || (defined(PYPY_VERSION) && PY_VERSION_HEX >= 0x03070000 \ ++ && PYPY_VERSION_NUM >= 0x07090000))) ++# define PyHASH_BITS _PyHASH_BITS ++# define PyHASH_MODULUS _PyHASH_MODULUS ++# define PyHASH_INF _PyHASH_INF ++# define PyHASH_IMAG _PyHASH_IMAG ++#endif ++ ++ ++// gh-111545 added Py_GetConstant() and Py_GetConstantBorrowed() ++// to Python 3.13.0a6 ++#if PY_VERSION_HEX < 0x030D00A6 && !defined(Py_CONSTANT_NONE) ++ ++#define Py_CONSTANT_NONE 0 ++#define Py_CONSTANT_FALSE 1 ++#define Py_CONSTANT_TRUE 2 ++#define Py_CONSTANT_ELLIPSIS 3 ++#define Py_CONSTANT_NOT_IMPLEMENTED 4 ++#define Py_CONSTANT_ZERO 5 ++#define Py_CONSTANT_ONE 6 ++#define Py_CONSTANT_EMPTY_STR 7 ++#define Py_CONSTANT_EMPTY_BYTES 8 ++#define Py_CONSTANT_EMPTY_TUPLE 9 ++ ++static inline PyObject* Py_GetConstant(unsigned int constant_id) ++{ ++ static PyObject* constants[Py_CONSTANT_EMPTY_TUPLE + 1] = {NULL}; ++ ++ if (constants[Py_CONSTANT_NONE] == NULL) { ++ constants[Py_CONSTANT_NONE] = Py_None; ++ constants[Py_CONSTANT_FALSE] = Py_False; ++ constants[Py_CONSTANT_TRUE] = Py_True; ++ constants[Py_CONSTANT_ELLIPSIS] = Py_Ellipsis; ++ constants[Py_CONSTANT_NOT_IMPLEMENTED] = Py_NotImplemented; ++ ++ constants[Py_CONSTANT_ZERO] = PyLong_FromLong(0); ++ if (constants[Py_CONSTANT_ZERO] == NULL) { ++ goto fatal_error; ++ } ++ ++ constants[Py_CONSTANT_ONE] = PyLong_FromLong(1); ++ if (constants[Py_CONSTANT_ONE] == NULL) { ++ goto fatal_error; ++ } ++ ++ constants[Py_CONSTANT_EMPTY_STR] = PyUnicode_FromStringAndSize("", 0); ++ if (constants[Py_CONSTANT_EMPTY_STR] == NULL) { ++ goto fatal_error; ++ } ++ ++ constants[Py_CONSTANT_EMPTY_BYTES] = PyBytes_FromStringAndSize("", 0); ++ if (constants[Py_CONSTANT_EMPTY_BYTES] == NULL) { ++ goto fatal_error; ++ } ++ ++ constants[Py_CONSTANT_EMPTY_TUPLE] = PyTuple_New(0); ++ if (constants[Py_CONSTANT_EMPTY_TUPLE] == NULL) { ++ goto fatal_error; ++ } ++ // goto dance to avoid compiler warnings about Py_FatalError() ++ goto init_done; ++ ++fatal_error: ++ // This case should never happen ++ Py_FatalError("Py_GetConstant() failed to get constants"); ++ } ++ ++init_done: ++ if (constant_id <= Py_CONSTANT_EMPTY_TUPLE) { ++ return Py_NewRef(constants[constant_id]); ++ } ++ else { ++ PyErr_BadInternalCall(); ++ return NULL; ++ } ++} ++ ++static inline PyObject* Py_GetConstantBorrowed(unsigned int constant_id) ++{ ++ PyObject *obj = Py_GetConstant(constant_id); ++ Py_XDECREF(obj); ++ return obj; ++} ++#endif ++ ++ ++// gh-114329 added PyList_GetItemRef() to Python 3.13.0a4 ++#if PY_VERSION_HEX < 0x030D00A4 ++static inline PyObject * ++PyList_GetItemRef(PyObject *op, Py_ssize_t index) ++{ ++ PyObject *item = PyList_GetItem(op, index); ++ Py_XINCREF(item); ++ return item; ++} ++#endif ++ ++ ++// gh-114329 added PyList_GetItemRef() to Python 3.13.0a4 ++#if PY_VERSION_HEX < 0x030D00A4 ++static inline int ++PyDict_SetDefaultRef(PyObject *d, PyObject *key, PyObject *default_value, ++ PyObject **result) ++{ ++ PyObject *value; ++ if (PyDict_GetItemRef(d, key, &value) < 0) { ++ // get error ++ if (result) { ++ *result = NULL; ++ } ++ return -1; ++ } ++ if (value != NULL) { ++ // present ++ if (result) { ++ *result = value; ++ } ++ else { ++ Py_DECREF(value); ++ } ++ return 1; ++ } ++ ++ // missing: set the item ++ if (PyDict_SetItem(d, key, default_value) < 0) { ++ // set error ++ if (result) { ++ *result = NULL; ++ } ++ return -1; ++ } ++ if (result) { ++ *result = Py_NewRef(default_value); ++ } ++ return 0; ++} ++#endif ++ ++#if PY_VERSION_HEX < 0x030D00B3 ++# define Py_BEGIN_CRITICAL_SECTION(op) { ++# define Py_END_CRITICAL_SECTION() } ++# define Py_BEGIN_CRITICAL_SECTION2(a, b) { ++# define Py_END_CRITICAL_SECTION2() } ++#endif ++ ++#if PY_VERSION_HEX < 0x030E0000 && PY_VERSION_HEX >= 0x03060000 && !defined(PYPY_VERSION) ++typedef struct PyUnicodeWriter PyUnicodeWriter; ++ ++static inline void PyUnicodeWriter_Discard(PyUnicodeWriter *writer) ++{ ++ _PyUnicodeWriter_Dealloc((_PyUnicodeWriter*)writer); ++ PyMem_Free(writer); ++} ++ ++static inline PyUnicodeWriter* PyUnicodeWriter_Create(Py_ssize_t length) ++{ ++ if (length < 0) { ++ PyErr_SetString(PyExc_ValueError, ++ "length must be positive"); ++ return NULL; ++ } ++ ++ const size_t size = sizeof(_PyUnicodeWriter); ++ PyUnicodeWriter *pub_writer = (PyUnicodeWriter *)PyMem_Malloc(size); ++ if (pub_writer == _Py_NULL) { ++ PyErr_NoMemory(); ++ return _Py_NULL; ++ } ++ _PyUnicodeWriter *writer = (_PyUnicodeWriter *)pub_writer; ++ ++ _PyUnicodeWriter_Init(writer); ++ if (_PyUnicodeWriter_Prepare(writer, length, 127) < 0) { ++ PyUnicodeWriter_Discard(pub_writer); ++ return NULL; ++ } ++ writer->overallocate = 1; ++ return pub_writer; ++} ++ ++static inline PyObject* PyUnicodeWriter_Finish(PyUnicodeWriter *writer) ++{ ++ PyObject *str = _PyUnicodeWriter_Finish((_PyUnicodeWriter*)writer); ++ assert(((_PyUnicodeWriter*)writer)->buffer == NULL); ++ PyMem_Free(writer); ++ return str; ++} ++ ++static inline int ++PyUnicodeWriter_WriteChar(PyUnicodeWriter *writer, Py_UCS4 ch) ++{ ++ if (ch > 0x10ffff) { ++ PyErr_SetString(PyExc_ValueError, ++ "character must be in range(0x110000)"); ++ return -1; ++ } ++ ++ return _PyUnicodeWriter_WriteChar((_PyUnicodeWriter*)writer, ch); ++} ++ ++static inline int ++PyUnicodeWriter_WriteStr(PyUnicodeWriter *writer, PyObject *obj) ++{ ++ PyObject *str = PyObject_Str(obj); ++ if (str == NULL) { ++ return -1; ++ } ++ ++ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str); ++ Py_DECREF(str); ++ return res; ++} ++ ++static inline int ++PyUnicodeWriter_WriteRepr(PyUnicodeWriter *writer, PyObject *obj) ++{ ++ PyObject *str = PyObject_Repr(obj); ++ if (str == NULL) { ++ return -1; ++ } ++ ++ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str); ++ Py_DECREF(str); ++ return res; ++} ++ ++static inline int ++PyUnicodeWriter_WriteUTF8(PyUnicodeWriter *writer, ++ const char *str, Py_ssize_t size) ++{ ++ if (size < 0) { ++ size = (Py_ssize_t)strlen(str); ++ } ++ ++ PyObject *str_obj = PyUnicode_FromStringAndSize(str, size); ++ if (str_obj == _Py_NULL) { ++ return -1; ++ } ++ ++ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str_obj); ++ Py_DECREF(str_obj); ++ return res; ++} ++ ++static inline int ++PyUnicodeWriter_WriteWideChar(PyUnicodeWriter *writer, ++ const wchar_t *str, Py_ssize_t size) ++{ ++ if (size < 0) { ++ size = (Py_ssize_t)wcslen(str); ++ } ++ ++ PyObject *str_obj = PyUnicode_FromWideChar(str, size); ++ if (str_obj == _Py_NULL) { ++ return -1; ++ } ++ ++ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str_obj); ++ Py_DECREF(str_obj); ++ return res; ++} ++ ++static inline int ++PyUnicodeWriter_WriteSubstring(PyUnicodeWriter *writer, PyObject *str, ++ Py_ssize_t start, Py_ssize_t end) ++{ ++ if (!PyUnicode_Check(str)) { ++ PyErr_Format(PyExc_TypeError, "expect str, not %T", str); ++ return -1; ++ } ++ if (start < 0 || start > end) { ++ PyErr_Format(PyExc_ValueError, "invalid start argument"); ++ return -1; ++ } ++ if (end > PyUnicode_GET_LENGTH(str)) { ++ PyErr_Format(PyExc_ValueError, "invalid end argument"); ++ return -1; ++ } ++ ++ return _PyUnicodeWriter_WriteSubstring((_PyUnicodeWriter*)writer, str, ++ start, end); ++} ++ ++static inline int ++PyUnicodeWriter_Format(PyUnicodeWriter *writer, const char *format, ...) ++{ ++ va_list vargs; ++ va_start(vargs, format); ++ PyObject *str = PyUnicode_FromFormatV(format, vargs); ++ va_end(vargs); ++ if (str == _Py_NULL) { ++ return -1; ++ } ++ ++ int res = _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, str); ++ Py_DECREF(str); ++ return res; ++} ++#endif // PY_VERSION_HEX < 0x030E0000 ++ ++// gh-116560 added PyLong_GetSign() to Python 3.14.0a0 ++#if PY_VERSION_HEX < 0x030E00A0 ++static inline int PyLong_GetSign(PyObject *obj, int *sign) ++{ ++ if (!PyLong_Check(obj)) { ++ PyErr_Format(PyExc_TypeError, "expect int, got %s", Py_TYPE(obj)->tp_name); ++ return -1; ++ } ++ ++ *sign = _PyLong_Sign(obj); ++ return 0; ++} ++#endif ++ ++ ++// gh-124502 added PyUnicode_Equal() to Python 3.14.0a0 ++#if PY_VERSION_HEX < 0x030E00A0 ++static inline int PyUnicode_Equal(PyObject *str1, PyObject *str2) ++{ ++ if (!PyUnicode_Check(str1)) { ++ PyErr_Format(PyExc_TypeError, "first argument must be str, not %s", ++ Py_TYPE(str1)->tp_name); ++ return -1; ++ } ++ if (!PyUnicode_Check(str2)) { ++ PyErr_Format(PyExc_TypeError, "second argument must be str, not %s", ++ Py_TYPE(str2)->tp_name); ++ return -1; ++ } ++ ++#if PY_VERSION_HEX >= 0x030d0000 && !defined(PYPY_VERSION) ++ PyAPI_FUNC(int) _PyUnicode_Equal(PyObject *str1, PyObject *str2); ++ ++ return _PyUnicode_Equal(str1, str2); ++#elif PY_VERSION_HEX >= 0x03060000 && !defined(PYPY_VERSION) ++ return _PyUnicode_EQ(str1, str2); ++#elif PY_VERSION_HEX >= 0x03090000 && defined(PYPY_VERSION) ++ return _PyUnicode_EQ(str1, str2); ++#else ++ return (PyUnicode_Compare(str1, str2) == 0); ++#endif ++} ++#endif ++ ++ ++// gh-121645 added PyBytes_Join() to Python 3.14.0a0 ++#if PY_VERSION_HEX < 0x030E00A0 ++static inline PyObject* PyBytes_Join(PyObject *sep, PyObject *iterable) ++{ ++ return _PyBytes_Join(sep, iterable); ++} ++#endif ++ ++ ++#if PY_VERSION_HEX < 0x030E00A0 ++static inline Py_hash_t Py_HashBuffer(const void *ptr, Py_ssize_t len) ++{ ++#if PY_VERSION_HEX >= 0x03000000 && !defined(PYPY_VERSION) ++ PyAPI_FUNC(Py_hash_t) _Py_HashBytes(const void *src, Py_ssize_t len); ++ ++ return _Py_HashBytes(ptr, len); ++#else ++ Py_hash_t hash; ++ PyObject *bytes = PyBytes_FromStringAndSize((const char*)ptr, len); ++ if (bytes == NULL) { ++ return -1; ++ } ++ hash = PyObject_Hash(bytes); ++ Py_DECREF(bytes); ++ return hash; ++#endif ++} ++#endif ++ ++ ++#if PY_VERSION_HEX < 0x030E00A0 ++static inline int PyIter_NextItem(PyObject *iter, PyObject **item) ++{ ++ iternextfunc tp_iternext; ++ ++ assert(iter != NULL); ++ assert(item != NULL); ++ ++ tp_iternext = Py_TYPE(iter)->tp_iternext; ++ if (tp_iternext == NULL) { ++ *item = NULL; ++ PyErr_Format(PyExc_TypeError, "expected an iterator, got '%s'", ++ Py_TYPE(iter)->tp_name); ++ return -1; ++ } ++ ++ if ((*item = tp_iternext(iter))) { ++ return 1; ++ } ++ if (!PyErr_Occurred()) { ++ return 0; ++ } ++ if (PyErr_ExceptionMatches(PyExc_StopIteration)) { ++ PyErr_Clear(); ++ return 0; ++ } ++ return -1; ++} ++#endif ++ ++ ++#if PY_VERSION_HEX < 0x030E00A0 ++static inline PyObject* PyLong_FromInt32(int32_t value) ++{ ++ Py_BUILD_ASSERT(sizeof(long) >= 4); ++ return PyLong_FromLong(value); ++} ++ ++static inline PyObject* PyLong_FromInt64(int64_t value) ++{ ++ Py_BUILD_ASSERT(sizeof(long long) >= 8); ++ return PyLong_FromLongLong(value); ++} ++ ++static inline PyObject* PyLong_FromUInt32(uint32_t value) ++{ ++ Py_BUILD_ASSERT(sizeof(unsigned long) >= 4); ++ return PyLong_FromUnsignedLong(value); ++} ++ ++static inline PyObject* PyLong_FromUInt64(uint64_t value) ++{ ++ Py_BUILD_ASSERT(sizeof(unsigned long long) >= 8); ++ return PyLong_FromUnsignedLongLong(value); ++} ++ ++static inline int PyLong_AsInt32(PyObject *obj, int32_t *pvalue) ++{ ++ Py_BUILD_ASSERT(sizeof(int) == 4); ++ int value = PyLong_AsInt(obj); ++ if (value == -1 && PyErr_Occurred()) { ++ return -1; ++ } ++ *pvalue = (int32_t)value; ++ return 0; ++} ++ ++static inline int PyLong_AsInt64(PyObject *obj, int64_t *pvalue) ++{ ++ Py_BUILD_ASSERT(sizeof(long long) == 8); ++ long long value = PyLong_AsLongLong(obj); ++ if (value == -1 && PyErr_Occurred()) { ++ return -1; ++ } ++ *pvalue = (int64_t)value; ++ return 0; ++} ++ ++static inline int PyLong_AsUInt32(PyObject *obj, uint32_t *pvalue) ++{ ++ Py_BUILD_ASSERT(sizeof(long) >= 4); ++ unsigned long value = PyLong_AsUnsignedLong(obj); ++ if (value == (unsigned long)-1 && PyErr_Occurred()) { ++ return -1; ++ } ++#if SIZEOF_LONG > 4 ++ if ((unsigned long)UINT32_MAX < value) { ++ PyErr_SetString(PyExc_OverflowError, ++ "Python int too large to convert to C uint32_t"); ++ return -1; ++ } ++#endif ++ *pvalue = (uint32_t)value; ++ return 0; ++} ++ ++static inline int PyLong_AsUInt64(PyObject *obj, uint64_t *pvalue) ++{ ++ Py_BUILD_ASSERT(sizeof(long long) == 8); ++ unsigned long long value = PyLong_AsUnsignedLongLong(obj); ++ if (value == (unsigned long long)-1 && PyErr_Occurred()) { ++ return -1; ++ } ++ *pvalue = (uint64_t)value; ++ return 0; ++} ++#endif ++ ++ ++#ifdef __cplusplus ++} ++#endif ++#endif // PYTHONCAPI_COMPAT diff --git a/python-numba.changes b/python-numba.changes index 9395517..eadcd3f 100644 --- a/python-numba.changes +++ b/python-numba.changes @@ -1,3 +1,8 @@ +------------------------------------------------------------------- +Fri Nov 22 09:42:21 UTC 2024 - Markéta Machová + +- Add upstream py313.patch to support Python 3.13 + ------------------------------------------------------------------- Tue Oct 29 20:01:54 UTC 2024 - Dirk Müller diff --git a/python-numba.spec b/python-numba.spec index 36a3f7f..8018c8e 100644 --- a/python-numba.spec +++ b/python-numba.spec @@ -56,8 +56,6 @@ ExclusiveArch: donotbuild ExcludeArch: s390x ppc64 %ix86 %arm %endif %endif -# not supported with 0.60.0 -%global skip_python313 1 Name: python-numba%{?psuffix} Version: 0.60.0 Release: 0 @@ -70,6 +68,8 @@ Source: https://files.pythonhosted.org/packages/source/n/numba/numba-%{v Patch3: skip-failing-tests.patch # PATCH-FIX-UPSTREAM https://github.com/numba/numba/pull/9741 Add Support for NumPy 2.1 Patch4: numpy21.patch +# PATCH-FIX-UPSTREAM https://github.com/numba/numba/pull/9682 Python 3.13 support +Patch5: py313.patch BuildRequires: %{python_module devel >= 3.9} BuildRequires: %{python_module numpy-devel >= %{min_numpy_ver} with %python-numpy-devel < %{max_numpy_ver}} BuildRequires: %{python_module pip}